mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #87091 from ahg-g/ahg-cleanup
Remove last scheduler dependencies on predicates package
This commit is contained in:
commit
88cf4e5c25
@ -13,7 +13,6 @@ go_library(
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/apis/config/scheme:go_default_library",
|
||||
@ -65,7 +64,6 @@ go_test(
|
||||
"//pkg/controller/volume/scheduling:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/apis/config/scheme:go_default_library",
|
||||
"//pkg/scheduler/apis/extender/v1:go_default_library",
|
||||
|
@ -2,22 +2,17 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"error.go",
|
||||
"predicates.go",
|
||||
],
|
||||
srcs = ["predicates.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/helper:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/nodeaffinity:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/nodename:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/nodeports:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/noderesources:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -26,16 +21,14 @@ go_test(
|
||||
srcs = ["predicates_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/nodename:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/nodeports:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/noderesources:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -1,121 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package predicates
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
// The predicateName tries to be consistent as the predicate name used in DefaultAlgorithmProvider defined in
|
||||
// defaults.go (which tend to be stable for backward compatibility)
|
||||
|
||||
// NOTE: If you add a new predicate failure error for a predicate that can never
|
||||
// be made to pass by removing pods, or you change an existing predicate so that
|
||||
// it can never be made to pass by removing pods, you need to add the predicate
|
||||
// failure error in nodesWherePreemptionMightHelp() in scheduler/core/generic_scheduler.go
|
||||
|
||||
// ErrNodeSelectorNotMatch is used for MatchNodeSelector predicate error.
|
||||
ErrNodeSelectorNotMatch = NewPredicateFailureError("MatchNodeSelector", "node(s) didn't match node selector")
|
||||
// ErrPodNotMatchHostName is used for HostName predicate error.
|
||||
ErrPodNotMatchHostName = NewPredicateFailureError("HostName", "node(s) didn't match the requested hostname")
|
||||
// ErrPodNotFitsHostPorts is used for PodFitsHostPorts predicate error.
|
||||
ErrPodNotFitsHostPorts = NewPredicateFailureError("PodFitsHostPorts", "node(s) didn't have free ports for the requested pod ports")
|
||||
// ErrNodeUnknownCondition is used for NodeUnknownCondition predicate error.
|
||||
ErrNodeUnknownCondition = NewPredicateFailureError("NodeUnknownCondition", "node(s) had unknown conditions")
|
||||
)
|
||||
|
||||
var unresolvablePredicateFailureErrors = map[PredicateFailureReason]struct{}{
|
||||
ErrNodeSelectorNotMatch: {},
|
||||
ErrPodNotMatchHostName: {},
|
||||
// Node conditions won't change when scheduler simulates removal of preemption victims.
|
||||
// So, it is pointless to try nodes that have not been able to host the pod due to node
|
||||
// conditions.
|
||||
ErrNodeUnknownCondition: {},
|
||||
}
|
||||
|
||||
// UnresolvablePredicateExists checks if there is at least one unresolvable predicate failure reason.
|
||||
func UnresolvablePredicateExists(reasons []PredicateFailureReason) bool {
|
||||
for _, r := range reasons {
|
||||
if _, ok := unresolvablePredicateFailureErrors[r]; ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// InsufficientResourceError is an error type that indicates what kind of resource limit is
|
||||
// hit and caused the unfitting failure.
|
||||
type InsufficientResourceError struct {
|
||||
// resourceName is the name of the resource that is insufficient
|
||||
ResourceName v1.ResourceName
|
||||
requested int64
|
||||
used int64
|
||||
capacity int64
|
||||
}
|
||||
|
||||
// NewInsufficientResourceError returns an InsufficientResourceError.
|
||||
func NewInsufficientResourceError(resourceName v1.ResourceName, requested, used, capacity int64) *InsufficientResourceError {
|
||||
return &InsufficientResourceError{
|
||||
ResourceName: resourceName,
|
||||
requested: requested,
|
||||
used: used,
|
||||
capacity: capacity,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *InsufficientResourceError) Error() string {
|
||||
return fmt.Sprintf("Node didn't have enough resource: %s, requested: %d, used: %d, capacity: %d",
|
||||
e.ResourceName, e.requested, e.used, e.capacity)
|
||||
}
|
||||
|
||||
// GetReason returns the reason of the InsufficientResourceError.
|
||||
func (e *InsufficientResourceError) GetReason() string {
|
||||
return fmt.Sprintf("Insufficient %v", e.ResourceName)
|
||||
}
|
||||
|
||||
// GetInsufficientAmount returns the amount of the insufficient resource of the error.
|
||||
func (e *InsufficientResourceError) GetInsufficientAmount() int64 {
|
||||
return e.requested - (e.capacity - e.used)
|
||||
}
|
||||
|
||||
// PredicateFailureError describes a failure error of predicate.
|
||||
type PredicateFailureError struct {
|
||||
PredicateName string
|
||||
PredicateDesc string
|
||||
}
|
||||
|
||||
// NewPredicateFailureError creates a PredicateFailureError with message.
|
||||
func NewPredicateFailureError(predicateName, predicateDesc string) *PredicateFailureError {
|
||||
return &PredicateFailureError{PredicateName: predicateName, PredicateDesc: predicateDesc}
|
||||
}
|
||||
|
||||
func (e *PredicateFailureError) Error() string {
|
||||
return fmt.Sprintf("Predicate %s failed", e.PredicateName)
|
||||
}
|
||||
|
||||
// GetReason returns the reason of the PredicateFailureError.
|
||||
func (e *PredicateFailureError) GetReason() string {
|
||||
return e.PredicateDesc
|
||||
}
|
||||
|
||||
// PredicateFailureReason interface represents the failure reason of a predicate.
|
||||
type PredicateFailureReason interface {
|
||||
GetReason() string
|
||||
}
|
@ -19,291 +19,78 @@ package predicates
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/klog"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
pluginhelper "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
|
||||
const (
|
||||
// MatchInterPodAffinityPred defines the name of predicate MatchInterPodAffinity.
|
||||
MatchInterPodAffinityPred = "MatchInterPodAffinity"
|
||||
// CheckVolumeBindingPred defines the name of predicate CheckVolumeBinding.
|
||||
CheckVolumeBindingPred = "CheckVolumeBinding"
|
||||
// GeneralPred defines the name of predicate GeneralPredicates.
|
||||
GeneralPred = "GeneralPredicates"
|
||||
// HostNamePred defines the name of predicate HostName.
|
||||
HostNamePred = "HostName"
|
||||
// PodFitsHostPortsPred defines the name of predicate PodFitsHostPorts.
|
||||
PodFitsHostPortsPred = "PodFitsHostPorts"
|
||||
// MatchNodeSelectorPred defines the name of predicate MatchNodeSelector.
|
||||
MatchNodeSelectorPred = "MatchNodeSelector"
|
||||
// PodFitsResourcesPred defines the name of predicate PodFitsResources.
|
||||
PodFitsResourcesPred = "PodFitsResources"
|
||||
// NoDiskConflictPred defines the name of predicate NoDiskConflict.
|
||||
NoDiskConflictPred = "NoDiskConflict"
|
||||
// PodToleratesNodeTaintsPred defines the name of predicate PodToleratesNodeTaints.
|
||||
PodToleratesNodeTaintsPred = "PodToleratesNodeTaints"
|
||||
// CheckNodeUnschedulablePred defines the name of predicate CheckNodeUnschedulablePredicate.
|
||||
CheckNodeUnschedulablePred = "CheckNodeUnschedulable"
|
||||
// CheckNodeLabelPresencePred defines the name of predicate CheckNodeLabelPresence.
|
||||
CheckNodeLabelPresencePred = "CheckNodeLabelPresence"
|
||||
// CheckServiceAffinityPred defines the name of predicate checkServiceAffinity.
|
||||
CheckServiceAffinityPred = "CheckServiceAffinity"
|
||||
// MaxEBSVolumeCountPred defines the name of predicate MaxEBSVolumeCount.
|
||||
// DEPRECATED
|
||||
// All cloudprovider specific predicates are deprecated in favour of MaxCSIVolumeCountPred.
|
||||
MaxEBSVolumeCountPred = "MaxEBSVolumeCount"
|
||||
// MaxGCEPDVolumeCountPred defines the name of predicate MaxGCEPDVolumeCount.
|
||||
// DEPRECATED
|
||||
// All cloudprovider specific predicates are deprecated in favour of MaxCSIVolumeCountPred.
|
||||
MaxGCEPDVolumeCountPred = "MaxGCEPDVolumeCount"
|
||||
// MaxAzureDiskVolumeCountPred defines the name of predicate MaxAzureDiskVolumeCount.
|
||||
// DEPRECATED
|
||||
// All cloudprovider specific predicates are deprecated in favour of MaxCSIVolumeCountPred.
|
||||
MaxAzureDiskVolumeCountPred = "MaxAzureDiskVolumeCount"
|
||||
// MaxCinderVolumeCountPred defines the name of predicate MaxCinderDiskVolumeCount.
|
||||
// DEPRECATED
|
||||
// All cloudprovider specific predicates are deprecated in favour of MaxCSIVolumeCountPred.
|
||||
MaxCinderVolumeCountPred = "MaxCinderVolumeCount"
|
||||
// MaxCSIVolumeCountPred defines the predicate that decides how many CSI volumes should be attached.
|
||||
MaxCSIVolumeCountPred = "MaxCSIVolumeCountPred"
|
||||
// NoVolumeZoneConflictPred defines the name of predicate NoVolumeZoneConflict.
|
||||
NoVolumeZoneConflictPred = "NoVolumeZoneConflict"
|
||||
// EvenPodsSpreadPred defines the name of predicate EvenPodsSpread.
|
||||
EvenPodsSpreadPred = "EvenPodsSpread"
|
||||
)
|
||||
// DEPRECATED: all the logic in this package exist only because kubelet uses it.
|
||||
|
||||
// IMPORTANT NOTE for predicate developers:
|
||||
// We are using cached predicate result for pods belonging to the same equivalence class.
|
||||
// So when updating an existing predicate, you should consider whether your change will introduce new
|
||||
// dependency to attributes of any API object like Pod, Node, Service etc.
|
||||
// If yes, you are expected to invalidate the cached predicate result for related API object change.
|
||||
// For example:
|
||||
// https://github.com/kubernetes/kubernetes/blob/36a218e/plugin/pkg/scheduler/factory/factory.go#L422
|
||||
|
||||
// IMPORTANT NOTE: this list contains the ordering of the predicates, if you develop a new predicate
|
||||
// it is mandatory to add its name to this list.
|
||||
// Otherwise it won't be processed, see generic_scheduler#podFitsOnNode().
|
||||
// The order is based on the restrictiveness & complexity of predicates.
|
||||
// Design doc: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/predicates-ordering.md
|
||||
var (
|
||||
predicatesOrdering = []string{CheckNodeUnschedulablePred,
|
||||
GeneralPred, HostNamePred, PodFitsHostPortsPred,
|
||||
MatchNodeSelectorPred, PodFitsResourcesPred, NoDiskConflictPred,
|
||||
PodToleratesNodeTaintsPred, CheckNodeLabelPresencePred,
|
||||
CheckServiceAffinityPred, MaxEBSVolumeCountPred, MaxGCEPDVolumeCountPred, MaxCSIVolumeCountPred,
|
||||
MaxAzureDiskVolumeCountPred, MaxCinderVolumeCountPred, CheckVolumeBindingPred, NoVolumeZoneConflictPred,
|
||||
EvenPodsSpreadPred, MatchInterPodAffinityPred}
|
||||
)
|
||||
|
||||
// Ordering returns the ordering of predicates.
|
||||
func Ordering() []string {
|
||||
return predicatesOrdering
|
||||
// InsufficientResourceError is an error type that indicates what kind of resource limit is
|
||||
// hit and caused the unfitting failure.
|
||||
type InsufficientResourceError struct {
|
||||
noderesources.InsufficientResource
|
||||
}
|
||||
|
||||
// Metadata interface represents anything that can access a predicate metadata.
|
||||
// DEPRECATED.
|
||||
type Metadata interface{}
|
||||
|
||||
// FitPredicate is a function that indicates if a pod fits into an existing node.
|
||||
// The failure information is given by the error.
|
||||
type FitPredicate func(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error)
|
||||
|
||||
// GetResourceRequest returns a *schedulernodeinfo.Resource that covers the largest
|
||||
// width in each resource dimension. Because init-containers run sequentially, we collect
|
||||
// the max in each dimension iteratively. In contrast, we sum the resource vectors for
|
||||
// regular containers since they run simultaneously.
|
||||
//
|
||||
// If Pod Overhead is specified and the feature gate is set, the resources defined for Overhead
|
||||
// are added to the calculated Resource request sum
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// Pod:
|
||||
// InitContainers
|
||||
// IC1:
|
||||
// CPU: 2
|
||||
// Memory: 1G
|
||||
// IC2:
|
||||
// CPU: 2
|
||||
// Memory: 3G
|
||||
// Containers
|
||||
// C1:
|
||||
// CPU: 2
|
||||
// Memory: 1G
|
||||
// C2:
|
||||
// CPU: 1
|
||||
// Memory: 1G
|
||||
//
|
||||
// Result: CPU: 3, Memory: 3G
|
||||
func GetResourceRequest(pod *v1.Pod) *schedulernodeinfo.Resource {
|
||||
result := &schedulernodeinfo.Resource{}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
result.Add(container.Resources.Requests)
|
||||
}
|
||||
|
||||
// take max_resource(sum_pod, any_init_container)
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
result.SetMaxResource(container.Resources.Requests)
|
||||
}
|
||||
|
||||
// If Overhead is being utilized, add to the total requests for the pod
|
||||
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
|
||||
result.Add(pod.Spec.Overhead)
|
||||
}
|
||||
|
||||
return result
|
||||
func (e *InsufficientResourceError) Error() string {
|
||||
return fmt.Sprintf("Node didn't have enough resource: %s, requested: %d, used: %d, capacity: %d",
|
||||
e.ResourceName, e.Requested, e.Used, e.Capacity)
|
||||
}
|
||||
|
||||
func podName(pod *v1.Pod) string {
|
||||
return pod.Namespace + "/" + pod.Name
|
||||
// PredicateFailureReason interface represents the failure reason of a predicate.
|
||||
type PredicateFailureReason interface {
|
||||
GetReason() string
|
||||
}
|
||||
|
||||
// PodFitsResources is a wrapper around PodFitsResourcesPredicate that implements FitPredicate interface.
|
||||
// TODO(#85822): remove this function once predicate registration logic is deleted.
|
||||
func PodFitsResources(pod *v1.Pod, _ Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
return PodFitsResourcesPredicate(pod, nil, nil, nodeInfo)
|
||||
// GetReason returns the reason of the InsufficientResourceError.
|
||||
func (e *InsufficientResourceError) GetReason() string {
|
||||
return fmt.Sprintf("Insufficient %v", e.ResourceName)
|
||||
}
|
||||
|
||||
// PodFitsResourcesPredicate checks if a node has sufficient resources, such as cpu, memory, gpu, opaque int resources etc to run a pod.
|
||||
// First return value indicates whether a node has sufficient resources to run a pod while the second return value indicates the
|
||||
// predicate failure reasons if the node has insufficient resources to run the pod
|
||||
func PodFitsResourcesPredicate(pod *v1.Pod, podRequest *schedulernodeinfo.Resource, ignoredExtendedResources sets.String, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
}
|
||||
|
||||
var predicateFails []PredicateFailureReason
|
||||
allowedPodNumber := nodeInfo.AllowedPodNumber()
|
||||
if len(nodeInfo.Pods())+1 > allowedPodNumber {
|
||||
predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourcePods, 1, int64(len(nodeInfo.Pods())), int64(allowedPodNumber)))
|
||||
}
|
||||
|
||||
if ignoredExtendedResources == nil {
|
||||
ignoredExtendedResources = sets.NewString()
|
||||
}
|
||||
|
||||
if podRequest == nil {
|
||||
podRequest = GetResourceRequest(pod)
|
||||
}
|
||||
if podRequest.MilliCPU == 0 &&
|
||||
podRequest.Memory == 0 &&
|
||||
podRequest.EphemeralStorage == 0 &&
|
||||
len(podRequest.ScalarResources) == 0 {
|
||||
return len(predicateFails) == 0, predicateFails, nil
|
||||
}
|
||||
|
||||
allocatable := nodeInfo.AllocatableResource()
|
||||
if allocatable.MilliCPU < podRequest.MilliCPU+nodeInfo.RequestedResource().MilliCPU {
|
||||
predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceCPU, podRequest.MilliCPU, nodeInfo.RequestedResource().MilliCPU, allocatable.MilliCPU))
|
||||
}
|
||||
if allocatable.Memory < podRequest.Memory+nodeInfo.RequestedResource().Memory {
|
||||
predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceMemory, podRequest.Memory, nodeInfo.RequestedResource().Memory, allocatable.Memory))
|
||||
}
|
||||
if allocatable.EphemeralStorage < podRequest.EphemeralStorage+nodeInfo.RequestedResource().EphemeralStorage {
|
||||
predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceEphemeralStorage, podRequest.EphemeralStorage, nodeInfo.RequestedResource().EphemeralStorage, allocatable.EphemeralStorage))
|
||||
}
|
||||
|
||||
for rName, rQuant := range podRequest.ScalarResources {
|
||||
if v1helper.IsExtendedResourceName(rName) {
|
||||
// If this resource is one of the extended resources that should be
|
||||
// ignored, we will skip checking it.
|
||||
if ignoredExtendedResources.Has(string(rName)) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if allocatable.ScalarResources[rName] < rQuant+nodeInfo.RequestedResource().ScalarResources[rName] {
|
||||
predicateFails = append(predicateFails, NewInsufficientResourceError(rName, podRequest.ScalarResources[rName], nodeInfo.RequestedResource().ScalarResources[rName], allocatable.ScalarResources[rName]))
|
||||
}
|
||||
}
|
||||
|
||||
if klog.V(10) && len(predicateFails) == 0 {
|
||||
// We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is
|
||||
// not logged. There is visible performance gain from it.
|
||||
klog.Infof("Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.",
|
||||
podName(pod), node.Name, len(nodeInfo.Pods()), allowedPodNumber)
|
||||
}
|
||||
return len(predicateFails) == 0, predicateFails, nil
|
||||
// GetInsufficientAmount returns the amount of the insufficient resource of the error.
|
||||
func (e *InsufficientResourceError) GetInsufficientAmount() int64 {
|
||||
return e.Requested - (e.Capacity - e.Used)
|
||||
}
|
||||
|
||||
// PodMatchNodeSelector checks if a pod node selector matches the node label.
|
||||
func PodMatchNodeSelector(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
}
|
||||
if pluginhelper.PodMatchesNodeSelectorAndAffinityTerms(pod, node) {
|
||||
return true, nil, nil
|
||||
}
|
||||
return false, []PredicateFailureReason{ErrNodeSelectorNotMatch}, nil
|
||||
// PredicateFailureError describes a failure error of predicate.
|
||||
type PredicateFailureError struct {
|
||||
PredicateName string
|
||||
PredicateDesc string
|
||||
}
|
||||
|
||||
// PodFitsHost checks if a pod spec node name matches the current node.
|
||||
func PodFitsHost(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
if len(pod.Spec.NodeName) == 0 {
|
||||
return true, nil, nil
|
||||
}
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
}
|
||||
if pod.Spec.NodeName == node.Name {
|
||||
return true, nil, nil
|
||||
}
|
||||
return false, []PredicateFailureReason{ErrPodNotMatchHostName}, nil
|
||||
func (e *PredicateFailureError) Error() string {
|
||||
return fmt.Sprintf("Predicate %s failed", e.PredicateName)
|
||||
}
|
||||
|
||||
// PodFitsHostPorts is a wrapper around PodFitsHostPortsPredicate. This is needed until
|
||||
// we are able to get rid of the FitPredicate function signature.
|
||||
// TODO(#85822): remove this function once predicate registration logic is deleted.
|
||||
func PodFitsHostPorts(pod *v1.Pod, _ Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
return PodFitsHostPortsPredicate(pod, nil, nodeInfo)
|
||||
}
|
||||
|
||||
// PodFitsHostPortsPredicate checks if a node has free ports for the requested pod ports.
|
||||
func PodFitsHostPortsPredicate(pod *v1.Pod, meta []*v1.ContainerPort, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
wantPorts := meta
|
||||
if wantPorts == nil {
|
||||
// Fallback to computing it.
|
||||
wantPorts = schedutil.GetContainerPorts(pod)
|
||||
}
|
||||
if len(wantPorts) == 0 {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
existingPorts := nodeInfo.UsedPorts()
|
||||
|
||||
// try to see whether existingPorts and wantPorts will conflict or not
|
||||
for _, cp := range wantPorts {
|
||||
if existingPorts.CheckConflict(cp.HostIP, string(cp.Protocol), cp.HostPort) {
|
||||
return false, []PredicateFailureReason{ErrPodNotFitsHostPorts}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil, nil
|
||||
// GetReason returns the reason of the PredicateFailureError.
|
||||
func (e *PredicateFailureError) GetReason() string {
|
||||
return e.PredicateDesc
|
||||
}
|
||||
|
||||
// GeneralPredicates checks a group of predicates that the kubelet cares about.
|
||||
// DEPRECATED: this exist only because kubelet uses it. We should change kubelet to execute the individual predicates it requires.
|
||||
func GeneralPredicates(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
var predicateFails []PredicateFailureReason
|
||||
for _, predicate := range []FitPredicate{PodFitsResources, PodFitsHost, PodFitsHostPorts, PodMatchNodeSelector} {
|
||||
fit, reasons, err := predicate(pod, meta, nodeInfo)
|
||||
if err != nil {
|
||||
return false, predicateFails, err
|
||||
}
|
||||
if !fit {
|
||||
predicateFails = append(predicateFails, reasons...)
|
||||
}
|
||||
func GeneralPredicates(pod *v1.Pod, _ interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
if nodeInfo.Node() == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
}
|
||||
|
||||
return len(predicateFails) == 0, predicateFails, nil
|
||||
var reasons []PredicateFailureReason
|
||||
for _, r := range noderesources.Fits(pod, nodeInfo, nil) {
|
||||
reasons = append(reasons, &InsufficientResourceError{InsufficientResource: r})
|
||||
}
|
||||
|
||||
if !pluginhelper.PodMatchesNodeSelectorAndAffinityTerms(pod, nodeInfo.Node()) {
|
||||
reasons = append(reasons, &PredicateFailureError{nodeaffinity.Name, nodeaffinity.ErrReason})
|
||||
}
|
||||
if !nodename.Fits(pod, nodeInfo) {
|
||||
reasons = append(reasons, &PredicateFailureError{nodename.Name, nodename.ErrReason})
|
||||
}
|
||||
if !nodeports.Fits(pod, nodeInfo) {
|
||||
reasons = append(reasons, &PredicateFailureError{nodeports.Name, nodeports.ErrReason})
|
||||
}
|
||||
|
||||
return len(reasons) == 0, reasons, nil
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -47,12 +47,13 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/apis/extender/v1:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/defaultpodtopologyspread:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/interpodaffinity:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/nodeaffinity:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/nodelabel:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/nodename:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/noderesources:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/nodeunschedulable:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/podtopologyspread:go_default_library",
|
||||
|
@ -37,12 +37,13 @@ import (
|
||||
"k8s.io/client-go/informers"
|
||||
clientsetfake "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
algorithmpredicates "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
extenderv1 "k8s.io/kubernetes/pkg/scheduler/apis/extender/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodelabel"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread"
|
||||
@ -1852,8 +1853,8 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
|
||||
{
|
||||
name: "No node should be attempted",
|
||||
nodesStatuses: framework.NodeToStatusMap{
|
||||
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeSelectorNotMatch.GetReason()),
|
||||
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrPodNotMatchHostName.GetReason()),
|
||||
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodeaffinity.ErrReason),
|
||||
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodename.ErrReason),
|
||||
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, tainttoleration.ErrReasonNotMatch),
|
||||
"machine4": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodelabel.ErrReasonPresenceViolated),
|
||||
},
|
||||
@ -1863,7 +1864,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
|
||||
name: "ErrReasonAffinityNotMatch should be tried as it indicates that the pod is unschedulable due to inter-pod affinity or anti-affinity",
|
||||
nodesStatuses: framework.NodeToStatusMap{
|
||||
"machine1": framework.NewStatus(framework.Unschedulable, interpodaffinity.ErrReasonAffinityNotMatch),
|
||||
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrPodNotMatchHostName.GetReason()),
|
||||
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodename.ErrReason),
|
||||
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodeunschedulable.ErrReasonUnschedulable),
|
||||
},
|
||||
expected: map[string]bool{"machine1": true, "machine4": true},
|
||||
@ -1872,7 +1873,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
|
||||
name: "pod with both pod affinity and anti-affinity should be tried",
|
||||
nodesStatuses: framework.NodeToStatusMap{
|
||||
"machine1": framework.NewStatus(framework.Unschedulable, interpodaffinity.ErrReasonAffinityNotMatch),
|
||||
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrPodNotMatchHostName.GetReason()),
|
||||
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodename.ErrReason),
|
||||
},
|
||||
expected: map[string]bool{"machine1": true, "machine3": true, "machine4": true},
|
||||
},
|
||||
@ -1888,7 +1889,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
|
||||
name: "Mix of failed predicates works fine",
|
||||
nodesStatuses: framework.NodeToStatusMap{
|
||||
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumerestrictions.ErrReasonDiskConflict),
|
||||
"machine2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400).GetReason()),
|
||||
"machine2": framework.NewStatus(framework.Unschedulable, fmt.Sprintf("Insufficient %v", v1.ResourceMemory)),
|
||||
},
|
||||
expected: map[string]bool{"machine2": true, "machine3": true, "machine4": true},
|
||||
},
|
||||
@ -1912,7 +1913,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
|
||||
name: "ErrTopologySpreadConstraintsNotMatch should be tried as it indicates that the pod is unschedulable due to topology spread constraints",
|
||||
nodesStatuses: framework.NodeToStatusMap{
|
||||
"machine1": framework.NewStatus(framework.Unschedulable, podtopologyspread.ErrReasonConstraintsNotMatch),
|
||||
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrPodNotMatchHostName.GetReason()),
|
||||
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodename.ErrReason),
|
||||
"machine3": framework.NewStatus(framework.Unschedulable, podtopologyspread.ErrReasonConstraintsNotMatch),
|
||||
},
|
||||
expected: map[string]bool{"machine1": true, "machine3": true, "machine4": true},
|
||||
@ -1955,9 +1956,9 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
|
||||
|
||||
func TestPreempt(t *testing.T) {
|
||||
defaultFailedNodeToStatusMap := framework.NodeToStatusMap{
|
||||
"machine1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 500, 300).GetReason()),
|
||||
"machine1": framework.NewStatus(framework.Unschedulable, fmt.Sprintf("Insufficient %v", v1.ResourceMemory)),
|
||||
"machine2": framework.NewStatus(framework.Unschedulable, volumerestrictions.ErrReasonDiskConflict),
|
||||
"machine3": framework.NewStatus(framework.Unschedulable, algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400).GetReason()),
|
||||
"machine3": framework.NewStatus(framework.Unschedulable, fmt.Sprintf("Insufficient %v", v1.ResourceMemory)),
|
||||
}
|
||||
// Prepare 3 node names.
|
||||
var defaultNodeNames []string
|
||||
|
@ -39,12 +39,11 @@ import (
|
||||
"k8s.io/klog"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
|
||||
"k8s.io/kubernetes/pkg/scheduler/core"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins"
|
||||
frameworkplugins "k8s.io/kubernetes/pkg/scheduler/framework/plugins"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
@ -194,8 +193,8 @@ func (c *Configurator) createFromProvider(providerName string) (*Scheduler, erro
|
||||
|
||||
// CreateFromConfig creates a scheduler from the configuration file
|
||||
func (c *Configurator) createFromConfig(policy schedulerapi.Policy) (*Scheduler, error) {
|
||||
lr := plugins.NewLegacyRegistry()
|
||||
args := &plugins.ConfigProducerArgs{}
|
||||
lr := frameworkplugins.NewLegacyRegistry()
|
||||
args := &frameworkplugins.ConfigProducerArgs{}
|
||||
|
||||
klog.V(2).Infof("Creating scheduler from configuration: %v", policy)
|
||||
|
||||
@ -221,7 +220,7 @@ func (c *Configurator) createFromConfig(policy schedulerapi.Policy) (*Scheduler,
|
||||
priorityKeys = lr.DefaultPriorities
|
||||
} else {
|
||||
for _, priority := range policy.Priorities {
|
||||
if priority.Name == plugins.EqualPriority {
|
||||
if priority.Name == frameworkplugins.EqualPriority {
|
||||
klog.V(2).Infof("Skip registering priority: %s", priority.Name)
|
||||
continue
|
||||
}
|
||||
@ -309,7 +308,7 @@ func (c *Configurator) createFromConfig(policy schedulerapi.Policy) (*Scheduler,
|
||||
// getPriorityConfigs returns priorities configuration: ones that will run as priorities and ones that will run
|
||||
// as framework plugins. Specifically, a priority will run as a framework plugin if a plugin config producer was
|
||||
// registered for that priority.
|
||||
func getPriorityConfigs(keys map[string]int64, lr *plugins.LegacyRegistry, args *plugins.ConfigProducerArgs) (*schedulerapi.Plugins, []schedulerapi.PluginConfig, error) {
|
||||
func getPriorityConfigs(keys map[string]int64, lr *frameworkplugins.LegacyRegistry, args *frameworkplugins.ConfigProducerArgs) (*schedulerapi.Plugins, []schedulerapi.PluginConfig, error) {
|
||||
var plugins schedulerapi.Plugins
|
||||
var pluginConfig []schedulerapi.PluginConfig
|
||||
|
||||
@ -340,7 +339,7 @@ func getPriorityConfigs(keys map[string]int64, lr *plugins.LegacyRegistry, args
|
||||
// registered for that predicate.
|
||||
// Note that the framework executes plugins according to their order in the Plugins list, and so predicates run as plugins
|
||||
// are added to the Plugins list according to the order specified in predicates.Ordering().
|
||||
func getPredicateConfigs(keys sets.String, lr *plugins.LegacyRegistry, args *plugins.ConfigProducerArgs) (*schedulerapi.Plugins, []schedulerapi.PluginConfig, error) {
|
||||
func getPredicateConfigs(keys sets.String, lr *frameworkplugins.LegacyRegistry, args *frameworkplugins.ConfigProducerArgs) (*schedulerapi.Plugins, []schedulerapi.PluginConfig, error) {
|
||||
allPredicates := keys.Union(lr.MandatoryPredicates)
|
||||
|
||||
// Create the framework plugin configurations, and place them in the order
|
||||
@ -348,7 +347,7 @@ func getPredicateConfigs(keys sets.String, lr *plugins.LegacyRegistry, args *plu
|
||||
var plugins schedulerapi.Plugins
|
||||
var pluginConfig []schedulerapi.PluginConfig
|
||||
|
||||
for _, predicateKey := range predicates.Ordering() {
|
||||
for _, predicateKey := range frameworkplugins.PredicateOrdering() {
|
||||
if allPredicates.Has(predicateKey) {
|
||||
producer, exist := lr.PredicateToConfigProducer[predicateKey]
|
||||
if !exist {
|
||||
|
@ -10,7 +10,6 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/defaultpodtopologyspread:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/imagelocality:go_default_library",
|
||||
@ -54,7 +53,6 @@ filegroup(
|
||||
"//pkg/scheduler/framework/plugins/helper:all-srcs",
|
||||
"//pkg/scheduler/framework/plugins/imagelocality:all-srcs",
|
||||
"//pkg/scheduler/framework/plugins/interpodaffinity:all-srcs",
|
||||
"//pkg/scheduler/framework/plugins/migration:all-srcs",
|
||||
"//pkg/scheduler/framework/plugins/nodeaffinity:all-srcs",
|
||||
"//pkg/scheduler/framework/plugins/nodelabel:all-srcs",
|
||||
"//pkg/scheduler/framework/plugins/nodename:all-srcs",
|
||||
|
@ -1634,6 +1634,124 @@ func TestRequiredAffinityMultipleNodes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPreFilterDisabled(t *testing.T) {
|
||||
labelRgChina := map[string]string{
|
||||
"region": "China",
|
||||
}
|
||||
labelRgChinaAzAz1 := map[string]string{
|
||||
"region": "China",
|
||||
"az": "az1",
|
||||
}
|
||||
labelRgIndia := map[string]string{
|
||||
"region": "India",
|
||||
}
|
||||
labelRgUS := map[string]string{
|
||||
"region": "US",
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
wantStatuses []*framework.Status
|
||||
name string
|
||||
}{
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "123"}},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "foo",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "region",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}},
|
||||
{
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "nodeC",
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "foo",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"123"},
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: "region",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeD", Labels: labelRgUS}},
|
||||
},
|
||||
wantStatuses: []*framework.Status{
|
||||
framework.NewStatus(
|
||||
framework.Unschedulable,
|
||||
ErrReasonAffinityNotMatch,
|
||||
ErrReasonAntiAffinityRulesNotMatch,
|
||||
),
|
||||
framework.NewStatus(
|
||||
framework.Unschedulable,
|
||||
ErrReasonAffinityNotMatch,
|
||||
ErrReasonAntiAffinityRulesNotMatch,
|
||||
),
|
||||
framework.NewStatus(
|
||||
framework.Unschedulable,
|
||||
ErrReasonAffinityNotMatch,
|
||||
ErrReasonExistingAntiAffinityRulesNotMatch,
|
||||
),
|
||||
nil,
|
||||
},
|
||||
name: "NodeA and nodeB have same topologyKey and label value. NodeA has an existing pod that matches the inter pod affinity rule. NodeC has an existing pod that match the inter pod affinity rule. The pod can not be scheduled onto nodeA, nodeB and nodeC but can be schedulerd onto nodeD",
|
||||
},
|
||||
}
|
||||
|
||||
for indexTest, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
|
||||
for indexNode, node := range test.nodes {
|
||||
p := &InterPodAffinity{
|
||||
sharedLister: snapshot,
|
||||
}
|
||||
state := framework.NewCycleState()
|
||||
nodeInfo := mustGetNodeInfo(t, snapshot, node.Name)
|
||||
gotStatus := p.Filter(context.Background(), state, test.pod, nodeInfo)
|
||||
if !reflect.DeepEqual(gotStatus, test.wantStatuses[indexNode]) {
|
||||
t.Errorf("index: %d status does not match: %v, want: %v", indexTest, gotStatus, test.wantStatuses[indexNode])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPreFilterStateAddRemovePod(t *testing.T) {
|
||||
var label1 = map[string]string{
|
||||
"region": "r1",
|
||||
|
@ -25,7 +25,6 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality"
|
||||
@ -86,6 +85,66 @@ const (
|
||||
EvenPodsSpreadPriority = "EvenPodsSpreadPriority"
|
||||
)
|
||||
|
||||
const (
|
||||
// MatchInterPodAffinityPred defines the name of predicate MatchInterPodAffinity.
|
||||
MatchInterPodAffinityPred = "MatchInterPodAffinity"
|
||||
// CheckVolumeBindingPred defines the name of predicate CheckVolumeBinding.
|
||||
CheckVolumeBindingPred = "CheckVolumeBinding"
|
||||
// GeneralPred defines the name of predicate GeneralPredicates.
|
||||
GeneralPred = "GeneralPredicates"
|
||||
// HostNamePred defines the name of predicate HostName.
|
||||
HostNamePred = "HostName"
|
||||
// PodFitsHostPortsPred defines the name of predicate PodFitsHostPorts.
|
||||
PodFitsHostPortsPred = "PodFitsHostPorts"
|
||||
// MatchNodeSelectorPred defines the name of predicate MatchNodeSelector.
|
||||
MatchNodeSelectorPred = "MatchNodeSelector"
|
||||
// PodFitsResourcesPred defines the name of predicate PodFitsResources.
|
||||
PodFitsResourcesPred = "PodFitsResources"
|
||||
// NoDiskConflictPred defines the name of predicate NoDiskConflict.
|
||||
NoDiskConflictPred = "NoDiskConflict"
|
||||
// PodToleratesNodeTaintsPred defines the name of predicate PodToleratesNodeTaints.
|
||||
PodToleratesNodeTaintsPred = "PodToleratesNodeTaints"
|
||||
// CheckNodeUnschedulablePred defines the name of predicate CheckNodeUnschedulablePredicate.
|
||||
CheckNodeUnschedulablePred = "CheckNodeUnschedulable"
|
||||
// CheckNodeLabelPresencePred defines the name of predicate CheckNodeLabelPresence.
|
||||
CheckNodeLabelPresencePred = "CheckNodeLabelPresence"
|
||||
// CheckServiceAffinityPred defines the name of predicate checkServiceAffinity.
|
||||
CheckServiceAffinityPred = "CheckServiceAffinity"
|
||||
// MaxEBSVolumeCountPred defines the name of predicate MaxEBSVolumeCount.
|
||||
// DEPRECATED
|
||||
// All cloudprovider specific predicates are deprecated in favour of MaxCSIVolumeCountPred.
|
||||
MaxEBSVolumeCountPred = "MaxEBSVolumeCount"
|
||||
// MaxGCEPDVolumeCountPred defines the name of predicate MaxGCEPDVolumeCount.
|
||||
// DEPRECATED
|
||||
// All cloudprovider specific predicates are deprecated in favour of MaxCSIVolumeCountPred.
|
||||
MaxGCEPDVolumeCountPred = "MaxGCEPDVolumeCount"
|
||||
// MaxAzureDiskVolumeCountPred defines the name of predicate MaxAzureDiskVolumeCount.
|
||||
// DEPRECATED
|
||||
// All cloudprovider specific predicates are deprecated in favour of MaxCSIVolumeCountPred.
|
||||
MaxAzureDiskVolumeCountPred = "MaxAzureDiskVolumeCount"
|
||||
// MaxCinderVolumeCountPred defines the name of predicate MaxCinderDiskVolumeCount.
|
||||
// DEPRECATED
|
||||
// All cloudprovider specific predicates are deprecated in favour of MaxCSIVolumeCountPred.
|
||||
MaxCinderVolumeCountPred = "MaxCinderVolumeCount"
|
||||
// MaxCSIVolumeCountPred defines the predicate that decides how many CSI volumes should be attached.
|
||||
MaxCSIVolumeCountPred = "MaxCSIVolumeCountPred"
|
||||
// NoVolumeZoneConflictPred defines the name of predicate NoVolumeZoneConflict.
|
||||
NoVolumeZoneConflictPred = "NoVolumeZoneConflict"
|
||||
// EvenPodsSpreadPred defines the name of predicate EvenPodsSpread.
|
||||
EvenPodsSpreadPred = "EvenPodsSpread"
|
||||
)
|
||||
|
||||
// PredicateOrdering returns the ordering of predicate execution.
|
||||
func PredicateOrdering() []string {
|
||||
return []string{CheckNodeUnschedulablePred,
|
||||
GeneralPred, HostNamePred, PodFitsHostPortsPred,
|
||||
MatchNodeSelectorPred, PodFitsResourcesPred, NoDiskConflictPred,
|
||||
PodToleratesNodeTaintsPred, CheckNodeLabelPresencePred,
|
||||
CheckServiceAffinityPred, MaxEBSVolumeCountPred, MaxGCEPDVolumeCountPred, MaxCSIVolumeCountPred,
|
||||
MaxAzureDiskVolumeCountPred, MaxCinderVolumeCountPred, CheckVolumeBindingPred, NoVolumeZoneConflictPred,
|
||||
EvenPodsSpreadPred, MatchInterPodAffinityPred}
|
||||
}
|
||||
|
||||
// LegacyRegistry is used to store current state of registered predicates and priorities.
|
||||
type LegacyRegistry struct {
|
||||
// maps that associate predicates/priorities with framework plugin configurations.
|
||||
@ -127,23 +186,23 @@ func NewLegacyRegistry() *LegacyRegistry {
|
||||
// MandatoryPredicates the set of keys for predicates that the scheduler will
|
||||
// be configured with all the time.
|
||||
MandatoryPredicates: sets.NewString(
|
||||
predicates.PodToleratesNodeTaintsPred,
|
||||
predicates.CheckNodeUnschedulablePred,
|
||||
PodToleratesNodeTaintsPred,
|
||||
CheckNodeUnschedulablePred,
|
||||
),
|
||||
|
||||
// Used as the default set of predicates if Policy was specified, but predicates was nil.
|
||||
DefaultPredicates: sets.NewString(
|
||||
predicates.NoVolumeZoneConflictPred,
|
||||
predicates.MaxEBSVolumeCountPred,
|
||||
predicates.MaxGCEPDVolumeCountPred,
|
||||
predicates.MaxAzureDiskVolumeCountPred,
|
||||
predicates.MaxCSIVolumeCountPred,
|
||||
predicates.MatchInterPodAffinityPred,
|
||||
predicates.NoDiskConflictPred,
|
||||
predicates.GeneralPred,
|
||||
predicates.PodToleratesNodeTaintsPred,
|
||||
predicates.CheckVolumeBindingPred,
|
||||
predicates.CheckNodeUnschedulablePred,
|
||||
NoVolumeZoneConflictPred,
|
||||
MaxEBSVolumeCountPred,
|
||||
MaxGCEPDVolumeCountPred,
|
||||
MaxAzureDiskVolumeCountPred,
|
||||
MaxCSIVolumeCountPred,
|
||||
MatchInterPodAffinityPred,
|
||||
NoDiskConflictPred,
|
||||
GeneralPred,
|
||||
PodToleratesNodeTaintsPred,
|
||||
CheckVolumeBindingPred,
|
||||
CheckNodeUnschedulablePred,
|
||||
),
|
||||
|
||||
// Used as the default set of predicates if Policy was specified, but priorities was nil.
|
||||
@ -162,7 +221,7 @@ func NewLegacyRegistry() *LegacyRegistry {
|
||||
PriorityToConfigProducer: make(map[string]ConfigProducer),
|
||||
}
|
||||
|
||||
registry.registerPredicateConfigProducer(predicates.GeneralPred,
|
||||
registry.registerPredicateConfigProducer(GeneralPred,
|
||||
func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
// GeneralPredicate is a combination of predicates.
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, noderesources.FitName, nil)
|
||||
@ -174,92 +233,92 @@ func NewLegacyRegistry() *LegacyRegistry {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, nodeaffinity.Name, nil)
|
||||
return
|
||||
})
|
||||
registry.registerPredicateConfigProducer(predicates.PodToleratesNodeTaintsPred,
|
||||
registry.registerPredicateConfigProducer(PodToleratesNodeTaintsPred,
|
||||
func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, tainttoleration.Name, nil)
|
||||
return
|
||||
})
|
||||
registry.registerPredicateConfigProducer(predicates.PodFitsResourcesPred,
|
||||
registry.registerPredicateConfigProducer(PodFitsResourcesPred,
|
||||
func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, noderesources.FitName, nil)
|
||||
plugins.PreFilter = appendToPluginSet(plugins.PreFilter, noderesources.FitName, nil)
|
||||
pluginConfig = append(pluginConfig, makePluginConfig(noderesources.FitName, args.NodeResourcesFitArgs))
|
||||
return
|
||||
})
|
||||
registry.registerPredicateConfigProducer(predicates.HostNamePred,
|
||||
registry.registerPredicateConfigProducer(HostNamePred,
|
||||
func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, nodename.Name, nil)
|
||||
return
|
||||
})
|
||||
registry.registerPredicateConfigProducer(predicates.PodFitsHostPortsPred,
|
||||
registry.registerPredicateConfigProducer(PodFitsHostPortsPred,
|
||||
func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, nodeports.Name, nil)
|
||||
plugins.PreFilter = appendToPluginSet(plugins.PreFilter, nodeports.Name, nil)
|
||||
return
|
||||
})
|
||||
registry.registerPredicateConfigProducer(predicates.MatchNodeSelectorPred,
|
||||
registry.registerPredicateConfigProducer(MatchNodeSelectorPred,
|
||||
func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, nodeaffinity.Name, nil)
|
||||
return
|
||||
})
|
||||
registry.registerPredicateConfigProducer(predicates.CheckNodeUnschedulablePred,
|
||||
registry.registerPredicateConfigProducer(CheckNodeUnschedulablePred,
|
||||
func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, nodeunschedulable.Name, nil)
|
||||
return
|
||||
})
|
||||
registry.registerPredicateConfigProducer(predicates.CheckVolumeBindingPred,
|
||||
registry.registerPredicateConfigProducer(CheckVolumeBindingPred,
|
||||
func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, volumebinding.Name, nil)
|
||||
return
|
||||
})
|
||||
registry.registerPredicateConfigProducer(predicates.NoDiskConflictPred,
|
||||
registry.registerPredicateConfigProducer(NoDiskConflictPred,
|
||||
func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, volumerestrictions.Name, nil)
|
||||
return
|
||||
})
|
||||
registry.registerPredicateConfigProducer(predicates.NoVolumeZoneConflictPred,
|
||||
registry.registerPredicateConfigProducer(NoVolumeZoneConflictPred,
|
||||
func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, volumezone.Name, nil)
|
||||
return
|
||||
})
|
||||
registry.registerPredicateConfigProducer(predicates.MaxCSIVolumeCountPred,
|
||||
registry.registerPredicateConfigProducer(MaxCSIVolumeCountPred,
|
||||
func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, nodevolumelimits.CSIName, nil)
|
||||
return
|
||||
})
|
||||
registry.registerPredicateConfigProducer(predicates.MaxEBSVolumeCountPred,
|
||||
registry.registerPredicateConfigProducer(MaxEBSVolumeCountPred,
|
||||
func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, nodevolumelimits.EBSName, nil)
|
||||
return
|
||||
})
|
||||
registry.registerPredicateConfigProducer(predicates.MaxGCEPDVolumeCountPred,
|
||||
registry.registerPredicateConfigProducer(MaxGCEPDVolumeCountPred,
|
||||
func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, nodevolumelimits.GCEPDName, nil)
|
||||
return
|
||||
})
|
||||
registry.registerPredicateConfigProducer(predicates.MaxAzureDiskVolumeCountPred,
|
||||
registry.registerPredicateConfigProducer(MaxAzureDiskVolumeCountPred,
|
||||
func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, nodevolumelimits.AzureDiskName, nil)
|
||||
return
|
||||
})
|
||||
registry.registerPredicateConfigProducer(predicates.MaxCinderVolumeCountPred,
|
||||
registry.registerPredicateConfigProducer(MaxCinderVolumeCountPred,
|
||||
func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, nodevolumelimits.CinderName, nil)
|
||||
return
|
||||
})
|
||||
registry.registerPredicateConfigProducer(predicates.MatchInterPodAffinityPred,
|
||||
registry.registerPredicateConfigProducer(MatchInterPodAffinityPred,
|
||||
func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, interpodaffinity.Name, nil)
|
||||
plugins.PreFilter = appendToPluginSet(plugins.PreFilter, interpodaffinity.Name, nil)
|
||||
return
|
||||
})
|
||||
registry.registerPredicateConfigProducer(predicates.CheckNodeLabelPresencePred,
|
||||
registry.registerPredicateConfigProducer(CheckNodeLabelPresencePred,
|
||||
func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, nodelabel.Name, nil)
|
||||
pluginConfig = append(pluginConfig, makePluginConfig(nodelabel.Name, args.NodeLabelArgs))
|
||||
return
|
||||
})
|
||||
registry.registerPredicateConfigProducer(predicates.CheckServiceAffinityPred,
|
||||
registry.registerPredicateConfigProducer(CheckServiceAffinityPred,
|
||||
func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, serviceaffinity.Name, nil)
|
||||
pluginConfig = append(pluginConfig, makePluginConfig(serviceaffinity.Name, args.ServiceAffinityArgs))
|
||||
@ -351,13 +410,13 @@ func NewLegacyRegistry() *LegacyRegistry {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.EvenPodsSpread) {
|
||||
klog.Infof("Registering EvenPodsSpread predicate and priority function")
|
||||
|
||||
registry.registerPredicateConfigProducer(predicates.EvenPodsSpreadPred,
|
||||
registry.registerPredicateConfigProducer(EvenPodsSpreadPred,
|
||||
func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.PreFilter = appendToPluginSet(plugins.PreFilter, podtopologyspread.Name, nil)
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, podtopologyspread.Name, nil)
|
||||
return
|
||||
})
|
||||
registry.DefaultPredicates.Insert(predicates.EvenPodsSpreadPred)
|
||||
registry.DefaultPredicates.Insert(EvenPodsSpreadPred)
|
||||
|
||||
registry.registerPriorityConfigProducer(EvenPodsSpreadPriority,
|
||||
func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
@ -433,7 +492,7 @@ func (lr *LegacyRegistry) ProcessPredicatePolicy(policy config.PredicatePolicy,
|
||||
predicateName := policy.Name
|
||||
if policy.Name == "PodFitsPorts" {
|
||||
// For compatibility reasons, "PodFitsPorts" as a key is still supported.
|
||||
predicateName = predicates.PodFitsHostPortsPred
|
||||
predicateName = PodFitsHostPortsPred
|
||||
}
|
||||
|
||||
if _, ok := lr.PredicateToConfigProducer[predicateName]; ok {
|
||||
@ -458,7 +517,7 @@ func (lr *LegacyRegistry) ProcessPredicatePolicy(policy config.PredicatePolicy,
|
||||
// We use the ServiceAffinity predicate name for all ServiceAffinity custom predicates.
|
||||
// It may get called multiple times but we essentially only register one instance of ServiceAffinity predicate.
|
||||
// This name is then used to find the registered plugin and run the plugin instead of the predicate.
|
||||
predicateName = predicates.CheckServiceAffinityPred
|
||||
predicateName = CheckServiceAffinityPred
|
||||
}
|
||||
|
||||
if policy.Argument.LabelsPresence != nil {
|
||||
@ -475,7 +534,7 @@ func (lr *LegacyRegistry) ProcessPredicatePolicy(policy config.PredicatePolicy,
|
||||
// We use the CheckNodeLabelPresencePred predicate name for all kNodeLabel custom predicates.
|
||||
// It may get called multiple times but we essentially only register one instance of NodeLabel predicate.
|
||||
// This name is then used to find the registered plugin and run the plugin instead of the predicate.
|
||||
predicateName = predicates.CheckNodeLabelPresencePred
|
||||
predicateName = CheckNodeLabelPresencePred
|
||||
|
||||
}
|
||||
return predicateName
|
||||
|
@ -1,37 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["utils.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["utils_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
],
|
||||
)
|
@ -1,88 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package migration
|
||||
|
||||
import (
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
// PrioritiesStateKey is the key in CycleState to PrioritiesStateData
|
||||
PrioritiesStateKey = "priorities"
|
||||
)
|
||||
|
||||
// PredicateResultToFrameworkStatus converts a predicate result (PredicateFailureReason + error)
|
||||
// to a framework status.
|
||||
func PredicateResultToFrameworkStatus(reasons []predicates.PredicateFailureReason, err error) *framework.Status {
|
||||
if s := ErrorToFrameworkStatus(err); s != nil {
|
||||
return s
|
||||
}
|
||||
|
||||
if len(reasons) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
code := framework.Unschedulable
|
||||
if predicates.UnresolvablePredicateExists(reasons) {
|
||||
code = framework.UnschedulableAndUnresolvable
|
||||
}
|
||||
|
||||
// We will keep all failure reasons.
|
||||
var failureReasons []string
|
||||
for _, reason := range reasons {
|
||||
failureReasons = append(failureReasons, reason.GetReason())
|
||||
}
|
||||
return framework.NewStatus(code, failureReasons...)
|
||||
}
|
||||
|
||||
// ErrorToFrameworkStatus converts an error to a framework status.
|
||||
func ErrorToFrameworkStatus(err error) *framework.Status {
|
||||
if err != nil {
|
||||
return framework.NewStatus(framework.Error, err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PrioritiesStateData is a pointer to PrioritiesMetadata.
|
||||
type PrioritiesStateData struct {
|
||||
Reference interface{}
|
||||
}
|
||||
|
||||
// Clone is supposed to make a copy of the data, but since this is just a pointer, we are practically
|
||||
// just copying the pointer.
|
||||
func (p *PrioritiesStateData) Clone() framework.StateData {
|
||||
return &PrioritiesStateData{
|
||||
Reference: p.Reference,
|
||||
}
|
||||
}
|
||||
|
||||
// PriorityMetadata returns priority metadata stored in CycleState.
|
||||
func PriorityMetadata(state *framework.CycleState) interface{} {
|
||||
if state == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var meta interface{}
|
||||
if s, err := state.Read(PrioritiesStateKey); err == nil {
|
||||
meta = s.(*PrioritiesStateData).Reference
|
||||
} else {
|
||||
klog.Errorf("reading key %q from CycleState, continuing without metadata: %v", PrioritiesStateKey, err)
|
||||
}
|
||||
return meta
|
||||
}
|
@ -1,68 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package migration
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
func TestPredicateResultToFrameworkStatus(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
err error
|
||||
reasons []predicates.PredicateFailureReason
|
||||
wantStatus *framework.Status
|
||||
}{
|
||||
{
|
||||
name: "Success",
|
||||
},
|
||||
{
|
||||
name: "Error",
|
||||
err: errors.New("Failed with error"),
|
||||
wantStatus: framework.NewStatus(framework.Error, "Failed with error"),
|
||||
},
|
||||
{
|
||||
name: "Error with reason",
|
||||
err: errors.New("Failed with error"),
|
||||
reasons: []predicates.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts},
|
||||
wantStatus: framework.NewStatus(framework.Error, "Failed with error"),
|
||||
},
|
||||
{
|
||||
name: "Unschedulable",
|
||||
reasons: []predicates.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts},
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, "node(s) didn't have free ports for the requested pod ports"),
|
||||
},
|
||||
{
|
||||
name: "Unschedulable and Unresolvable",
|
||||
reasons: []predicates.PredicateFailureReason{predicates.ErrPodNotFitsHostPorts, predicates.ErrNodeSelectorNotMatch},
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, "node(s) didn't have free ports for the requested pod ports", "node(s) didn't match node selector"),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotStatus := PredicateResultToFrameworkStatus(tt.reasons, tt.err)
|
||||
if !reflect.DeepEqual(tt.wantStatus, gotStatus) {
|
||||
t.Errorf("Got status %v, want %v", gotStatus, tt.wantStatus)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -7,7 +7,6 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/helper:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
@ -37,7 +36,6 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
pluginhelper "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
@ -38,8 +37,13 @@ type NodeAffinity struct {
|
||||
var _ framework.FilterPlugin = &NodeAffinity{}
|
||||
var _ framework.ScorePlugin = &NodeAffinity{}
|
||||
|
||||
// Name is the name of the plugin used in the plugin registry and configurations.
|
||||
const Name = "NodeAffinity"
|
||||
const (
|
||||
// Name is the name of the plugin used in the plugin registry and configurations.
|
||||
Name = "NodeAffinity"
|
||||
|
||||
// ErrReason for node affinity/selector not matching.
|
||||
ErrReason = "node(s) didn't match node selector"
|
||||
)
|
||||
|
||||
// Name returns name of the plugin. It is used in logs, etc.
|
||||
func (pl *NodeAffinity) Name() string {
|
||||
@ -53,7 +57,7 @@ func (pl *NodeAffinity) Filter(ctx context.Context, state *framework.CycleState,
|
||||
return framework.NewStatus(framework.Error, "node not found")
|
||||
}
|
||||
if !pluginhelper.PodMatchesNodeSelectorAndAffinityTerms(pod, node) {
|
||||
return framework.NewStatus(framework.UnschedulableAndUnresolvable, predicates.ErrNodeSelectorNotMatch.GetReason())
|
||||
return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReason)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
@ -52,7 +51,7 @@ func TestNodeAffinity(t *testing.T) {
|
||||
},
|
||||
},
|
||||
name: "missing labels",
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, predicates.ErrNodeSelectorNotMatch.GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReason),
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@ -94,7 +93,7 @@ func TestNodeAffinity(t *testing.T) {
|
||||
"foo": "bar",
|
||||
},
|
||||
name: "node labels are subset",
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, predicates.ErrNodeSelectorNotMatch.GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReason),
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@ -230,7 +229,7 @@ func TestNodeAffinity(t *testing.T) {
|
||||
"foo": "bar",
|
||||
},
|
||||
name: "Pod with affinity that don't match node's labels won't schedule onto the node",
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, predicates.ErrNodeSelectorNotMatch.GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReason),
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@ -248,7 +247,7 @@ func TestNodeAffinity(t *testing.T) {
|
||||
"foo": "bar",
|
||||
},
|
||||
name: "Pod with a nil []NodeSelectorTerm in affinity, can't match the node's labels and won't schedule onto the node",
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, predicates.ErrNodeSelectorNotMatch.GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReason),
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@ -266,7 +265,7 @@ func TestNodeAffinity(t *testing.T) {
|
||||
"foo": "bar",
|
||||
},
|
||||
name: "Pod with an empty []NodeSelectorTerm in affinity, can't match the node's labels and won't schedule onto the node",
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, predicates.ErrNodeSelectorNotMatch.GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReason),
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@ -288,7 +287,7 @@ func TestNodeAffinity(t *testing.T) {
|
||||
"foo": "bar",
|
||||
},
|
||||
name: "Pod with empty MatchExpressions is not a valid value will match no objects and won't schedule onto the node",
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, predicates.ErrNodeSelectorNotMatch.GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReason),
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{},
|
||||
@ -371,7 +370,7 @@ func TestNodeAffinity(t *testing.T) {
|
||||
"GPU": "NVIDIA-GRID-K1",
|
||||
},
|
||||
name: "Pod with multiple matchExpressions ANDed that doesn't match the existing node",
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, predicates.ErrNodeSelectorNotMatch.GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReason),
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@ -468,7 +467,7 @@ func TestNodeAffinity(t *testing.T) {
|
||||
},
|
||||
name: "Pod with an Affinity matches node's labels but the PodSpec.NodeSelector(the old thing that we are deprecating) " +
|
||||
"is not satisfied, won't schedule onto the node",
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, predicates.ErrNodeSelectorNotMatch.GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReason),
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@ -496,7 +495,7 @@ func TestNodeAffinity(t *testing.T) {
|
||||
"foo": "bar",
|
||||
},
|
||||
name: "Pod with an invalid value in Affinity term won't be scheduled onto the node",
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, predicates.ErrNodeSelectorNotMatch.GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReason),
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@ -547,7 +546,7 @@ func TestNodeAffinity(t *testing.T) {
|
||||
},
|
||||
nodeName: "node_2",
|
||||
name: "Pod with matchFields using In operator that does not match the existing node",
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, predicates.ErrNodeSelectorNotMatch.GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReason),
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@ -616,7 +615,7 @@ func TestNodeAffinity(t *testing.T) {
|
||||
nodeName: "node_2",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
name: "Pod with one term: matchFields does not match, but matchExpressions matches",
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, predicates.ErrNodeSelectorNotMatch.GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReason),
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
@ -685,7 +684,7 @@ func TestNodeAffinity(t *testing.T) {
|
||||
nodeName: "node_2",
|
||||
labels: map[string]string{"foo": "bar"},
|
||||
name: "Pod with two terms: both matchFields and matchExpressions do not match",
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, predicates.ErrNodeSelectorNotMatch.GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReason),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -6,8 +6,6 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/migration:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
@ -34,7 +32,6 @@ go_test(
|
||||
srcs = ["node_name_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
|
@ -21,8 +21,6 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
@ -32,8 +30,13 @@ type NodeName struct{}
|
||||
|
||||
var _ framework.FilterPlugin = &NodeName{}
|
||||
|
||||
// Name is the name of the plugin used in the plugin registry and configurations.
|
||||
const Name = "NodeName"
|
||||
const (
|
||||
// Name is the name of the plugin used in the plugin registry and configurations.
|
||||
Name = "NodeName"
|
||||
|
||||
// ErrReason returned when node name doesn't match.
|
||||
ErrReason = "node(s) didn't match the requested hostname"
|
||||
)
|
||||
|
||||
// Name returns name of the plugin. It is used in logs, etc.
|
||||
func (pl *NodeName) Name() string {
|
||||
@ -42,8 +45,18 @@ func (pl *NodeName) Name() string {
|
||||
|
||||
// Filter invoked at the filter extension point.
|
||||
func (pl *NodeName) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
||||
_, reasons, err := predicates.PodFitsHost(pod, nil, nodeInfo)
|
||||
return migration.PredicateResultToFrameworkStatus(reasons, err)
|
||||
if nodeInfo.Node() == nil {
|
||||
return framework.NewStatus(framework.Error, "node not found")
|
||||
}
|
||||
if !Fits(pod, nodeInfo) {
|
||||
return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReason)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fits actually checks if the pod fits the node.
|
||||
func Fits(pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) bool {
|
||||
return len(pod.Spec.NodeName) == 0 || pod.Spec.NodeName == nodeInfo.Node().Name
|
||||
}
|
||||
|
||||
// New initializes a new plugin and returns it.
|
||||
|
@ -23,7 +23,6 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
@ -65,7 +64,7 @@ func TestNodeName(t *testing.T) {
|
||||
},
|
||||
},
|
||||
name: "host doesn't match",
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, predicates.ErrPodNotMatchHostName.GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReason),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -6,14 +6,10 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/migration:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -36,9 +32,9 @@ go_test(
|
||||
srcs = ["node_ports_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -22,13 +22,8 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
|
||||
// NodePorts is a plugin that checks if a node has free ports for the requested pod ports.
|
||||
@ -43,6 +38,9 @@ const (
|
||||
// preFilterStateKey is the key in CycleState to NodePorts pre-computed data.
|
||||
// Using the name of the plugin will likely help us avoid collisions with other plugins.
|
||||
preFilterStateKey = "PreFilter" + Name
|
||||
|
||||
// ErrReason when node ports aren't available.
|
||||
ErrReason = "node(s) didn't have free ports for the requested pod ports"
|
||||
)
|
||||
|
||||
type preFilterState []*v1.ContainerPort
|
||||
@ -58,9 +56,24 @@ func (pl *NodePorts) Name() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
// getContainerPorts returns the used host ports of Pods: if 'port' was used, a 'port:true' pair
|
||||
// will be in the result; but it does not resolve port conflict.
|
||||
func getContainerPorts(pods ...*v1.Pod) []*v1.ContainerPort {
|
||||
ports := []*v1.ContainerPort{}
|
||||
for _, pod := range pods {
|
||||
for j := range pod.Spec.Containers {
|
||||
container := &pod.Spec.Containers[j]
|
||||
for k := range container.Ports {
|
||||
ports = append(ports, &container.Ports[k])
|
||||
}
|
||||
}
|
||||
}
|
||||
return ports
|
||||
}
|
||||
|
||||
// PreFilter invoked at the prefilter extension point.
|
||||
func (pl *NodePorts) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) *framework.Status {
|
||||
s := schedutil.GetContainerPorts(pod)
|
||||
s := getContainerPorts(pod)
|
||||
cycleState.Write(preFilterStateKey, preFilterState(s))
|
||||
return nil
|
||||
}
|
||||
@ -71,15 +84,10 @@ func (pl *NodePorts) PreFilterExtensions() framework.PreFilterExtensions {
|
||||
}
|
||||
|
||||
func getPreFilterState(cycleState *framework.CycleState) (preFilterState, error) {
|
||||
if cycleState == nil {
|
||||
return nil, fmt.Errorf("invalid nil CycleState")
|
||||
}
|
||||
|
||||
c, err := cycleState.Read(preFilterStateKey)
|
||||
if err != nil {
|
||||
// The metadata wasn't pre-computed in prefilter. We ignore the error for now since
|
||||
// preFilterState state doesn't exist. We ignore the error for now since
|
||||
// Filter is able to handle that by computing it again.
|
||||
klog.V(5).Infof("Error reading %q from cycleState: %v", preFilterStateKey, err)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -92,12 +100,38 @@ func getPreFilterState(cycleState *framework.CycleState) (preFilterState, error)
|
||||
|
||||
// Filter invoked at the filter extension point.
|
||||
func (pl *NodePorts) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
||||
state, err := getPreFilterState(cycleState)
|
||||
wantPorts, err := getPreFilterState(cycleState)
|
||||
if err != nil {
|
||||
return framework.NewStatus(framework.Error, err.Error())
|
||||
}
|
||||
_, reasons, err := predicates.PodFitsHostPortsPredicate(pod, state, nodeInfo)
|
||||
return migration.PredicateResultToFrameworkStatus(reasons, err)
|
||||
|
||||
var fits bool
|
||||
if wantPorts != nil {
|
||||
fits = fitsPorts(wantPorts, nodeInfo)
|
||||
} else {
|
||||
fits = Fits(pod, nodeInfo)
|
||||
}
|
||||
if !fits {
|
||||
return framework.NewStatus(framework.Unschedulable, ErrReason)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fits checks if the pod fits the node.
|
||||
func Fits(pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) bool {
|
||||
return fitsPorts(getContainerPorts(pod), nodeInfo)
|
||||
}
|
||||
|
||||
func fitsPorts(wantPorts []*v1.ContainerPort, nodeInfo *nodeinfo.NodeInfo) bool {
|
||||
// try to see whether existingPorts and wantPorts will conflict or not
|
||||
existingPorts := nodeInfo.UsedPorts()
|
||||
for _, cp := range wantPorts {
|
||||
if existingPorts.CheckConflict(cp.HostIP, string(cp.Protocol), cp.HostPort) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// New initializes a new plugin and returns it.
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
@ -76,14 +76,14 @@ func TestNodePorts(t *testing.T) {
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newPod("m1", "UDP/127.0.0.1/8080")),
|
||||
name: "same udp port",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrPodNotFitsHostPorts.GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
|
||||
},
|
||||
{
|
||||
pod: newPod("m1", "TCP/127.0.0.1/8080"),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newPod("m1", "TCP/127.0.0.1/8080")),
|
||||
name: "same tcp port",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrPodNotFitsHostPorts.GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
|
||||
},
|
||||
{
|
||||
pod: newPod("m1", "TCP/127.0.0.1/8080"),
|
||||
@ -102,35 +102,35 @@ func TestNodePorts(t *testing.T) {
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newPod("m1", "UDP/127.0.0.1/8080")),
|
||||
name: "second udp port conflict",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrPodNotFitsHostPorts.GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
|
||||
},
|
||||
{
|
||||
pod: newPod("m1", "TCP/127.0.0.1/8001", "UDP/127.0.0.1/8080"),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newPod("m1", "TCP/127.0.0.1/8001", "UDP/127.0.0.1/8081")),
|
||||
name: "first tcp port conflict",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrPodNotFitsHostPorts.GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
|
||||
},
|
||||
{
|
||||
pod: newPod("m1", "TCP/0.0.0.0/8001"),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newPod("m1", "TCP/127.0.0.1/8001")),
|
||||
name: "first tcp port conflict due to 0.0.0.0 hostIP",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrPodNotFitsHostPorts.GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
|
||||
},
|
||||
{
|
||||
pod: newPod("m1", "TCP/10.0.10.10/8001", "TCP/0.0.0.0/8001"),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newPod("m1", "TCP/127.0.0.1/8001")),
|
||||
name: "TCP hostPort conflict due to 0.0.0.0 hostIP",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrPodNotFitsHostPorts.GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
|
||||
},
|
||||
{
|
||||
pod: newPod("m1", "TCP/127.0.0.1/8001"),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newPod("m1", "TCP/0.0.0.0/8001")),
|
||||
name: "second tcp port conflict to 0.0.0.0 hostIP",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrPodNotFitsHostPorts.GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
|
||||
},
|
||||
{
|
||||
pod: newPod("m1", "UDP/127.0.0.1/8001"),
|
||||
@ -143,7 +143,7 @@ func TestNodePorts(t *testing.T) {
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newPod("m1", "TCP/0.0.0.0/8001", "UDP/0.0.0.0/8001")),
|
||||
name: "UDP hostPort conflict due to 0.0.0.0 hostIP",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.ErrPodNotFitsHostPorts.GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
|
||||
},
|
||||
}
|
||||
|
||||
@ -162,3 +162,154 @@ func TestNodePorts(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPreFilterDisabled(t *testing.T) {
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
nodeInfo *schedulernodeinfo.NodeInfo
|
||||
name string
|
||||
wantStatus *framework.Status
|
||||
}{
|
||||
{
|
||||
pod: &v1.Pod{},
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(),
|
||||
name: "nothing running",
|
||||
},
|
||||
{
|
||||
pod: newPod("m1", "UDP/127.0.0.1/8080"),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newPod("m1", "UDP/127.0.0.1/9090")),
|
||||
name: "other port",
|
||||
},
|
||||
{
|
||||
pod: newPod("m1", "UDP/127.0.0.1/8080"),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newPod("m1", "UDP/127.0.0.1/8080")),
|
||||
name: "same udp port",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
p, _ := New(nil, nil)
|
||||
cycleState := framework.NewCycleState()
|
||||
gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), cycleState, test.pod, test.nodeInfo)
|
||||
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
|
||||
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetContainerPorts(t *testing.T) {
|
||||
tests := []struct {
|
||||
pod1 *v1.Pod
|
||||
pod2 *v1.Pod
|
||||
expected []*v1.ContainerPort
|
||||
}{
|
||||
{
|
||||
pod1: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8001,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8002,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8003,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8004,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pod2: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8011,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8012,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8013,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8014,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []*v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8001,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8002,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8003,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8004,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8011,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8012,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8013,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8014,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
result := getContainerPorts(test.pod1, test.pod2)
|
||||
if !reflect.DeepEqual(test.expected, result) {
|
||||
t.Errorf("Got different result than expected.\nDifference detected on:\n%s", diff.ObjectGoPrintSideBySide(test.expected, result))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -17,9 +17,7 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/migration:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
@ -61,7 +59,6 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||
|
@ -23,12 +23,12 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
var _ framework.PreFilterPlugin = &Fit{}
|
||||
@ -57,7 +57,7 @@ type FitArgs struct {
|
||||
|
||||
// preFilterState computed at PreFilter and used at Filter.
|
||||
type preFilterState struct {
|
||||
podResourceRequest *nodeinfo.Resource
|
||||
schedulernodeinfo.Resource
|
||||
}
|
||||
|
||||
// Clone the prefilter state.
|
||||
@ -70,12 +70,55 @@ func (f *Fit) Name() string {
|
||||
return FitName
|
||||
}
|
||||
|
||||
// computePodResourceRequest returns a schedulernodeinfo.Resource that covers the largest
|
||||
// width in each resource dimension. Because init-containers run sequentially, we collect
|
||||
// the max in each dimension iteratively. In contrast, we sum the resource vectors for
|
||||
// regular containers since they run simultaneously.
|
||||
//
|
||||
// If Pod Overhead is specified and the feature gate is set, the resources defined for Overhead
|
||||
// are added to the calculated Resource request sum
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// Pod:
|
||||
// InitContainers
|
||||
// IC1:
|
||||
// CPU: 2
|
||||
// Memory: 1G
|
||||
// IC2:
|
||||
// CPU: 2
|
||||
// Memory: 3G
|
||||
// Containers
|
||||
// C1:
|
||||
// CPU: 2
|
||||
// Memory: 1G
|
||||
// C2:
|
||||
// CPU: 1
|
||||
// Memory: 1G
|
||||
//
|
||||
// Result: CPU: 3, Memory: 3G
|
||||
func computePodResourceRequest(pod *v1.Pod) *preFilterState {
|
||||
result := &preFilterState{}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
result.Add(container.Resources.Requests)
|
||||
}
|
||||
|
||||
// take max_resource(sum_pod, any_init_container)
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
result.SetMaxResource(container.Resources.Requests)
|
||||
}
|
||||
|
||||
// If Overhead is being utilized, add to the total requests for the pod
|
||||
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
|
||||
result.Add(pod.Spec.Overhead)
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// PreFilter invoked at the prefilter extension point.
|
||||
func (f *Fit) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) *framework.Status {
|
||||
s := &preFilterState{
|
||||
podResourceRequest: predicates.GetResourceRequest(pod),
|
||||
}
|
||||
cycleState.Write(preFilterStateKey, s)
|
||||
cycleState.Write(preFilterStateKey, computePodResourceRequest(pod))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -84,10 +127,10 @@ func (f *Fit) PreFilterExtensions() framework.PreFilterExtensions {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPodResourceRequest(cycleState *framework.CycleState) (*nodeinfo.Resource, error) {
|
||||
func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error) {
|
||||
c, err := cycleState.Read(preFilterStateKey)
|
||||
if err != nil {
|
||||
// The metadata wasn't pre-computed in prefilter. We ignore the error for now since
|
||||
// The preFilterState doesn't exist. We ignore the error for now since
|
||||
// Filter is able to handle that by computing it again.
|
||||
klog.V(5).Infof("Error reading %q from cycleState: %v", preFilterStateKey, err)
|
||||
return nil, nil
|
||||
@ -97,17 +140,123 @@ func getPodResourceRequest(cycleState *framework.CycleState) (*nodeinfo.Resource
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%+v convert to NodeResourcesFit.preFilterState error", c)
|
||||
}
|
||||
return s.podResourceRequest, nil
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Filter invoked at the filter extension point.
|
||||
func (f *Fit) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
||||
r, err := getPodResourceRequest(cycleState)
|
||||
// Checks if a node has sufficient resources, such as cpu, memory, gpu, opaque int resources etc to run a pod.
|
||||
// It returns a list of insufficient resources, if empty, then the node has all the resources requested by the pod.
|
||||
func (f *Fit) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *framework.Status {
|
||||
s, err := getPreFilterState(cycleState)
|
||||
if err != nil {
|
||||
return framework.NewStatus(framework.Error, err.Error())
|
||||
}
|
||||
_, reasons, err := predicates.PodFitsResourcesPredicate(pod, r, f.ignoredResources, nodeInfo)
|
||||
return migration.PredicateResultToFrameworkStatus(reasons, err)
|
||||
|
||||
var insufficientResources []InsufficientResource
|
||||
if s != nil {
|
||||
insufficientResources = fitsRequest(s, nodeInfo, f.ignoredResources)
|
||||
} else {
|
||||
insufficientResources = Fits(pod, nodeInfo, f.ignoredResources)
|
||||
}
|
||||
|
||||
// We will keep all failure reasons.
|
||||
var failureReasons []string
|
||||
for _, r := range insufficientResources {
|
||||
failureReasons = append(failureReasons, getErrReason(r.ResourceName))
|
||||
}
|
||||
|
||||
if len(insufficientResources) != 0 {
|
||||
return framework.NewStatus(framework.Unschedulable, failureReasons...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getErrReason(rn v1.ResourceName) string {
|
||||
return fmt.Sprintf("Insufficient %v", rn)
|
||||
}
|
||||
|
||||
// InsufficientResource describes what kind of resource limit is hit and caused the pod to not fit the node.
|
||||
type InsufficientResource struct {
|
||||
ResourceName v1.ResourceName
|
||||
Requested int64
|
||||
Used int64
|
||||
Capacity int64
|
||||
}
|
||||
|
||||
// Fits checks if node have enough resources to host the pod.
|
||||
func Fits(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo, ignoredExtendedResources sets.String) []InsufficientResource {
|
||||
return fitsRequest(computePodResourceRequest(pod), nodeInfo, ignoredExtendedResources)
|
||||
}
|
||||
|
||||
func fitsRequest(podRequest *preFilterState, nodeInfo *schedulernodeinfo.NodeInfo, ignoredExtendedResources sets.String) []InsufficientResource {
|
||||
var insufficientResources []InsufficientResource
|
||||
|
||||
allowedPodNumber := nodeInfo.AllowedPodNumber()
|
||||
if len(nodeInfo.Pods())+1 > allowedPodNumber {
|
||||
insufficientResources = append(insufficientResources, InsufficientResource{
|
||||
v1.ResourcePods,
|
||||
1,
|
||||
int64(len(nodeInfo.Pods())),
|
||||
int64(allowedPodNumber),
|
||||
})
|
||||
}
|
||||
|
||||
if ignoredExtendedResources == nil {
|
||||
ignoredExtendedResources = sets.NewString()
|
||||
}
|
||||
|
||||
if podRequest.MilliCPU == 0 &&
|
||||
podRequest.Memory == 0 &&
|
||||
podRequest.EphemeralStorage == 0 &&
|
||||
len(podRequest.ScalarResources) == 0 {
|
||||
return insufficientResources
|
||||
}
|
||||
|
||||
allocatable := nodeInfo.AllocatableResource()
|
||||
if allocatable.MilliCPU < podRequest.MilliCPU+nodeInfo.RequestedResource().MilliCPU {
|
||||
insufficientResources = append(insufficientResources, InsufficientResource{
|
||||
v1.ResourceCPU,
|
||||
podRequest.MilliCPU,
|
||||
nodeInfo.RequestedResource().MilliCPU,
|
||||
allocatable.MilliCPU,
|
||||
})
|
||||
}
|
||||
if allocatable.Memory < podRequest.Memory+nodeInfo.RequestedResource().Memory {
|
||||
insufficientResources = append(insufficientResources, InsufficientResource{
|
||||
v1.ResourceMemory,
|
||||
podRequest.Memory,
|
||||
nodeInfo.RequestedResource().Memory,
|
||||
allocatable.Memory,
|
||||
})
|
||||
}
|
||||
if allocatable.EphemeralStorage < podRequest.EphemeralStorage+nodeInfo.RequestedResource().EphemeralStorage {
|
||||
insufficientResources = append(insufficientResources, InsufficientResource{
|
||||
v1.ResourceEphemeralStorage,
|
||||
podRequest.EphemeralStorage,
|
||||
nodeInfo.RequestedResource().EphemeralStorage,
|
||||
allocatable.EphemeralStorage,
|
||||
})
|
||||
}
|
||||
|
||||
for rName, rQuant := range podRequest.ScalarResources {
|
||||
if v1helper.IsExtendedResourceName(rName) {
|
||||
// If this resource is one of the extended resources that should be
|
||||
// ignored, we will skip checking it.
|
||||
if ignoredExtendedResources.Has(string(rName)) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if allocatable.ScalarResources[rName] < rQuant+nodeInfo.RequestedResource().ScalarResources[rName] {
|
||||
insufficientResources = append(insufficientResources, InsufficientResource{
|
||||
rName,
|
||||
podRequest.ScalarResources[rName],
|
||||
nodeInfo.RequestedResource().ScalarResources[rName],
|
||||
allocatable.ScalarResources[rName],
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return insufficientResources
|
||||
}
|
||||
|
||||
// NewFit initializes a new plugin and returns it.
|
||||
|
@ -28,7 +28,6 @@ import (
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
@ -88,16 +87,17 @@ func newResourceOverheadPod(pod *v1.Pod, overhead v1.ResourceList) *v1.Pod {
|
||||
pod.Spec.Overhead = overhead
|
||||
return pod
|
||||
}
|
||||
func TestNodeResourcesFit(t *testing.T) {
|
||||
|
||||
func TestEnoughRequests(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)()
|
||||
|
||||
enoughPodsTests := []struct {
|
||||
pod *v1.Pod
|
||||
nodeInfo *schedulernodeinfo.NodeInfo
|
||||
name string
|
||||
ignoredResources []byte
|
||||
preFilterDisabled bool
|
||||
wantStatus *framework.Status
|
||||
pod *v1.Pod
|
||||
nodeInfo *schedulernodeinfo.NodeInfo
|
||||
name string
|
||||
ignoredResources []byte
|
||||
wantInsufficientResources []InsufficientResource
|
||||
wantStatus *framework.Status
|
||||
}{
|
||||
{
|
||||
pod: &v1.Pod{},
|
||||
@ -109,52 +109,41 @@ func TestNodeResourcesFit(t *testing.T) {
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})),
|
||||
name: "too many resources fails",
|
||||
wantStatus: framework.NewStatus(
|
||||
framework.Unschedulable,
|
||||
predicates.NewInsufficientResourceError(v1.ResourceCPU, 2, 10, 10).GetReason(),
|
||||
predicates.NewInsufficientResourceError(v1.ResourceMemory, 2, 10, 10).GetReason(),
|
||||
),
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})),
|
||||
name: "without prefilter",
|
||||
preFilterDisabled: true,
|
||||
wantStatus: framework.NewStatus(
|
||||
framework.Unschedulable,
|
||||
predicates.NewInsufficientResourceError(v1.ResourceCPU, 2, 10, 10).GetReason(),
|
||||
predicates.NewInsufficientResourceError(v1.ResourceMemory, 2, 10, 10).GetReason(),
|
||||
),
|
||||
name: "too many resources fails",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU), getErrReason(v1.ResourceMemory)),
|
||||
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, 1, 10, 10}, {v1.ResourceMemory, 1, 20, 20}},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 3, Memory: 1}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 19})),
|
||||
name: "too many resources fails due to init container cpu",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10).GetReason()),
|
||||
name: "too many resources fails due to init container cpu",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
|
||||
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, 3, 8, 10}},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 3, Memory: 1}, schedulernodeinfo.Resource{MilliCPU: 2, Memory: 1}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 19})),
|
||||
name: "too many resources fails due to highest init container cpu",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10).GetReason()),
|
||||
name: "too many resources fails due to highest init container cpu",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
|
||||
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, 3, 8, 10}},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 3}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
|
||||
name: "too many resources fails due to init container memory",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20).GetReason()),
|
||||
name: "too many resources fails due to init container memory",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
|
||||
wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, 3, 19, 20}},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 3}, schedulernodeinfo.Resource{MilliCPU: 1, Memory: 2}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})),
|
||||
name: "too many resources fails due to highest init container memory",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20).GetReason()),
|
||||
name: "too many resources fails due to highest init container memory",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
|
||||
wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, 3, 19, 20}},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
|
||||
@ -178,15 +167,17 @@ func TestNodeResourcesFit(t *testing.T) {
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 1}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 5})),
|
||||
name: "one resource memory fits",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourceCPU, 2, 9, 10).GetReason()),
|
||||
name: "one resource memory fits",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
|
||||
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, 2, 9, 10}},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 2}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
|
||||
name: "one resource cpu fits",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourceMemory, 2, 19, 20).GetReason()),
|
||||
name: "one resource cpu fits",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
|
||||
wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, 2, 19, 20}},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
|
||||
@ -215,32 +206,36 @@ func TestNodeResourcesFit(t *testing.T) {
|
||||
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
|
||||
name: "extended resource capacity enforced",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(extendedResourceA, 10, 0, 5).GetReason()),
|
||||
name: "extended resource capacity enforced",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{{extendedResourceA, 10, 0, 5}},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
|
||||
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
|
||||
name: "extended resource capacity enforced for init container",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(extendedResourceA, 10, 0, 5).GetReason()),
|
||||
name: "extended resource capacity enforced for init container",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{{extendedResourceA, 10, 0, 5}},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
|
||||
name: "extended resource allocatable enforced",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(extendedResourceA, 1, 5, 5).GetReason()),
|
||||
name: "extended resource allocatable enforced",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{{extendedResourceA, 1, 5, 5}},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
|
||||
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
|
||||
name: "extended resource allocatable enforced for init container",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(extendedResourceA, 1, 5, 5).GetReason()),
|
||||
name: "extended resource allocatable enforced for init container",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{{extendedResourceA, 1, 5, 5}},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
@ -248,8 +243,9 @@ func TestNodeResourcesFit(t *testing.T) {
|
||||
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
|
||||
name: "extended resource allocatable enforced for multiple containers",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(extendedResourceA, 6, 2, 5).GetReason()),
|
||||
name: "extended resource allocatable enforced for multiple containers",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{{extendedResourceA, 6, 2, 5}},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
|
||||
@ -265,56 +261,63 @@ func TestNodeResourcesFit(t *testing.T) {
|
||||
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
|
||||
name: "extended resource allocatable enforced for multiple init containers",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(extendedResourceA, 6, 2, 5).GetReason()),
|
||||
name: "extended resource allocatable enforced for multiple init containers",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{{extendedResourceA, 6, 2, 5}},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
|
||||
name: "extended resource allocatable enforced for unknown resource",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(extendedResourceB, 1, 0, 0).GetReason()),
|
||||
name: "extended resource allocatable enforced for unknown resource",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)),
|
||||
wantInsufficientResources: []InsufficientResource{{extendedResourceB, 1, 0, 0}},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
|
||||
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
|
||||
name: "extended resource allocatable enforced for unknown resource for init container",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(extendedResourceB, 1, 0, 0).GetReason()),
|
||||
name: "extended resource allocatable enforced for unknown resource for init container",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)),
|
||||
wantInsufficientResources: []InsufficientResource{{extendedResourceB, 1, 0, 0}},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceA: 10}}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
|
||||
name: "kubernetes.io resource capacity enforced",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(kubernetesIOResourceA, 10, 0, 0).GetReason()),
|
||||
name: "kubernetes.io resource capacity enforced",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{{kubernetesIOResourceA, 10, 0, 0}},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
|
||||
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceB: 10}}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
|
||||
name: "kubernetes.io resource capacity enforced for init container",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(kubernetesIOResourceB, 10, 0, 0).GetReason()),
|
||||
name: "kubernetes.io resource capacity enforced for init container",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceB)),
|
||||
wantInsufficientResources: []InsufficientResource{{kubernetesIOResourceB, 10, 0, 0}},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
|
||||
name: "hugepages resource capacity enforced",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(hugePageResourceA, 10, 0, 5).GetReason()),
|
||||
name: "hugepages resource capacity enforced",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{{hugePageResourceA, 10, 0, 5}},
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}),
|
||||
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
|
||||
name: "hugepages resource capacity enforced for init container",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(hugePageResourceA, 10, 0, 5).GetReason()),
|
||||
name: "hugepages resource capacity enforced for init container",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{{hugePageResourceA, 10, 0, 5}},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
@ -322,14 +325,14 @@ func TestNodeResourcesFit(t *testing.T) {
|
||||
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})),
|
||||
name: "hugepages resource allocatable enforced for multiple containers",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(hugePageResourceA, 6, 2, 5).GetReason()),
|
||||
name: "hugepages resource allocatable enforced for multiple containers",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
|
||||
wantInsufficientResources: []InsufficientResource{{hugePageResourceA, 6, 2, 5}},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
|
||||
ignoredResources: []byte(`{"IgnoredResources" : ["example.com/bbb"]}`),
|
||||
name: "skip checking ignored extended resource",
|
||||
},
|
||||
@ -338,19 +341,18 @@ func TestNodeResourcesFit(t *testing.T) {
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
|
||||
v1.ResourceList{v1.ResourceCPU: resource.MustParse("3m"), v1.ResourceMemory: resource.MustParse("13")},
|
||||
),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})),
|
||||
name: "resources + pod overhead fits",
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})),
|
||||
name: "resources + pod overhead fits",
|
||||
},
|
||||
{
|
||||
pod: newResourceOverheadPod(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
|
||||
v1.ResourceList{v1.ResourceCPU: resource.MustParse("1m"), v1.ResourceMemory: resource.MustParse("15")},
|
||||
),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})),
|
||||
name: "requests + overhead does not fit for memory",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourceMemory, 16, 5, 20).GetReason()),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})),
|
||||
name: "requests + overhead does not fit for memory",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
|
||||
wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, 16, 5, 20}},
|
||||
},
|
||||
}
|
||||
|
||||
@ -362,20 +364,82 @@ func TestNodeResourcesFit(t *testing.T) {
|
||||
args := &runtime.Unknown{Raw: test.ignoredResources}
|
||||
p, _ := NewFit(args, nil)
|
||||
cycleState := framework.NewCycleState()
|
||||
if !test.preFilterDisabled {
|
||||
preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(context.Background(), cycleState, test.pod)
|
||||
if !preFilterStatus.IsSuccess() {
|
||||
t.Errorf("prefilter failed with status: %v", preFilterStatus)
|
||||
}
|
||||
preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(context.Background(), cycleState, test.pod)
|
||||
if !preFilterStatus.IsSuccess() {
|
||||
t.Errorf("prefilter failed with status: %v", preFilterStatus)
|
||||
}
|
||||
|
||||
gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), cycleState, test.pod, test.nodeInfo)
|
||||
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
|
||||
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
|
||||
}
|
||||
|
||||
gotInsufficientResources := Fits(test.pod, test.nodeInfo, p.(*Fit).ignoredResources)
|
||||
if !reflect.DeepEqual(gotInsufficientResources, test.wantInsufficientResources) {
|
||||
t.Errorf("insufficient resources do not match: %v, want: %v", gotInsufficientResources, test.wantInsufficientResources)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPreFilterDisabled(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)()
|
||||
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
nodeInfo *schedulernodeinfo.NodeInfo
|
||||
name string
|
||||
ignoredResources []byte
|
||||
wantInsufficientResources []InsufficientResource
|
||||
wantStatus *framework.Status
|
||||
}{
|
||||
{
|
||||
pod: &v1.Pod{},
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})),
|
||||
name: "no resources requested always fits",
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})),
|
||||
name: "too many resources fails",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU), getErrReason(v1.ResourceMemory)),
|
||||
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, 1, 10, 10}, {v1.ResourceMemory, 1, 20, 20}},
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(
|
||||
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})),
|
||||
ignoredResources: []byte(`{"IgnoredResources" : ["example.com/bbb"]}`),
|
||||
name: "skip checking ignored extended resource",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
|
||||
args := &runtime.Unknown{Raw: test.ignoredResources}
|
||||
p, _ := NewFit(args, nil)
|
||||
cycleState := framework.NewCycleState()
|
||||
|
||||
gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), cycleState, test.pod, test.nodeInfo)
|
||||
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
|
||||
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
|
||||
}
|
||||
|
||||
gotInsufficientResources := Fits(test.pod, test.nodeInfo, p.(*Fit).ignoredResources)
|
||||
if !reflect.DeepEqual(gotInsufficientResources, test.wantInsufficientResources) {
|
||||
t.Errorf("insufficient resources do not match: %v, want: %v", gotInsufficientResources, test.wantInsufficientResources)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNotEnoughRequests(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)()
|
||||
notEnoughPodsTests := []struct {
|
||||
pod *v1.Pod
|
||||
nodeInfo *schedulernodeinfo.NodeInfo
|
||||
@ -384,32 +448,28 @@ func TestNodeResourcesFit(t *testing.T) {
|
||||
wantStatus *framework.Status
|
||||
}{
|
||||
{
|
||||
pod: &v1.Pod{},
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})),
|
||||
pod: &v1.Pod{},
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})),
|
||||
name: "even without specified resources predicate fails when there's no space for additional pod",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1).GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourcePods)),
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})),
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})),
|
||||
name: "even if both resources fit predicate fails when there's no space for additional pod",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1).GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourcePods)),
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
|
||||
name: "even for equal edge case predicate fails when there's no space for additional pod",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1).GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourcePods)),
|
||||
},
|
||||
{
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
|
||||
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}),
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})),
|
||||
name: "even for equal edge case predicate fails when there's no space for additional pod due to init container",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1).GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourcePods)),
|
||||
},
|
||||
}
|
||||
for _, test := range notEnoughPodsTests {
|
||||
@ -431,6 +491,11 @@ func TestNodeResourcesFit(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestStorageRequests(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)()
|
||||
|
||||
storagePodsTests := []struct {
|
||||
pod *v1.Pod
|
||||
nodeInfo *schedulernodeinfo.NodeInfo
|
||||
@ -442,7 +507,7 @@ func TestNodeResourcesFit(t *testing.T) {
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 10})),
|
||||
name: "due to container scratch disk",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourceCPU, 1, 10, 10).GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}),
|
||||
@ -455,7 +520,7 @@ func TestNodeResourcesFit(t *testing.T) {
|
||||
nodeInfo: schedulernodeinfo.NewNodeInfo(
|
||||
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 2})),
|
||||
name: "storage ephemeral local storage request exceeds allocatable",
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourceEphemeralStorage, 25, 0, 20).GetReason()),
|
||||
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceEphemeralStorage)),
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(schedulernodeinfo.Resource{EphemeralStorage: 10}),
|
||||
|
@ -39,7 +39,6 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/listers/fake:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
|
@ -34,7 +34,6 @@ import (
|
||||
csitrans "k8s.io/csi-translation-lib"
|
||||
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
@ -239,16 +238,15 @@ func TestCSILimits(t *testing.T) {
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
newPod *v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
filterName string
|
||||
maxVols int
|
||||
driverNames []string
|
||||
test string
|
||||
migrationEnabled bool
|
||||
limitSource string
|
||||
expectedFailureReason *predicates.PredicateFailureError
|
||||
wantStatus *framework.Status
|
||||
newPod *v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
filterName string
|
||||
maxVols int
|
||||
driverNames []string
|
||||
test string
|
||||
migrationEnabled bool
|
||||
limitSource string
|
||||
wantStatus *framework.Status
|
||||
}{
|
||||
{
|
||||
newPod: csiEBSOneVolPod,
|
||||
|
@ -44,7 +44,6 @@ import (
|
||||
"k8s.io/client-go/tools/events"
|
||||
volumescheduling "k8s.io/kubernetes/pkg/controller/volume/scheduling"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/core"
|
||||
frameworkplugins "k8s.io/kubernetes/pkg/scheduler/framework/plugins"
|
||||
@ -150,10 +149,6 @@ func (es mockScheduler) Schedule(ctx context.Context, state *framework.CycleStat
|
||||
return es.result, es.err
|
||||
}
|
||||
|
||||
func (es mockScheduler) Predicates() map[string]predicates.FitPredicate {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (es mockScheduler) Extenders() []algorithm.SchedulerExtender {
|
||||
return nil
|
||||
}
|
||||
@ -360,7 +355,7 @@ func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) {
|
||||
client := clientsetfake.NewSimpleClientset(&node)
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
|
||||
f := st.RegisterFilterPlugin("PodFitsHostPorts", nodeports.New)
|
||||
f := st.RegisterPluginAsExtensions(nodeports.Name, 1, nodeports.New, "Filter", "PreFilter")
|
||||
scheduler, bindingChan, _ := setupTestSchedulerWithOnePodOnNode(t, queuedPodStore, scache, informerFactory, stop, f, pod, &node)
|
||||
|
||||
waitPodExpireChan := make(chan struct{})
|
||||
@ -423,7 +418,7 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
|
||||
scache.AddNode(&node)
|
||||
client := clientsetfake.NewSimpleClientset(&node)
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
f := st.RegisterFilterPlugin(nodeports.Name, nodeports.New)
|
||||
f := st.RegisterPluginAsExtensions(nodeports.Name, 1, nodeports.New, "Filter", "PreFilter")
|
||||
scheduler, bindingChan, errChan := setupTestSchedulerWithOnePodOnNode(t, queuedPodStore, scache, informerFactory, stop, f, firstPod, &node)
|
||||
|
||||
// We use conflicted pod ports to incur fit predicate failure.
|
||||
@ -441,7 +436,7 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
|
||||
FilteredNodesStatuses: framework.NodeToStatusMap{
|
||||
node.Name: framework.NewStatus(
|
||||
framework.Unschedulable,
|
||||
predicates.ErrPodNotFitsHostPorts.GetReason(),
|
||||
nodeports.ErrReason,
|
||||
),
|
||||
},
|
||||
}
|
||||
@ -517,7 +512,7 @@ func TestSchedulerErrorWithLongBinding(t *testing.T) {
|
||||
|
||||
client := clientsetfake.NewSimpleClientset(&node)
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
f := st.RegisterFilterPlugin(nodeports.Name, nodeports.New)
|
||||
f := st.RegisterPluginAsExtensions(nodeports.Name, 1, nodeports.New, "Filter", "PreFilter")
|
||||
|
||||
scheduler, bindingChan := setupTestSchedulerLongBindingWithRetry(
|
||||
queuedPodStore, scache, informerFactory, f, stop, test.BindingDuration)
|
||||
@ -630,8 +625,8 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
|
||||
for _, node := range nodes {
|
||||
failedNodeStatues[node.Name] = framework.NewStatus(
|
||||
framework.Unschedulable,
|
||||
predicates.NewInsufficientResourceError(v1.ResourceCPU, 4000, 0, 2000).GetReason(),
|
||||
predicates.NewInsufficientResourceError(v1.ResourceMemory, 500, 0, 100).GetReason(),
|
||||
fmt.Sprintf("Insufficient %v", v1.ResourceCPU),
|
||||
fmt.Sprintf("Insufficient %v", v1.ResourceMemory),
|
||||
)
|
||||
}
|
||||
f := st.RegisterFilterPlugin("PodFitsResources", noderesources.NewFit)
|
||||
@ -665,7 +660,8 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
|
||||
func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache internalcache.Cache, informerFactory informers.SharedInformerFactory, f st.RegisterPluginFunc, recorder events.EventRecorder) (*Scheduler, chan *v1.Binding, chan error) {
|
||||
registry := framework.Registry{}
|
||||
plugins := &schedulerapi.Plugins{
|
||||
Filter: &schedulerapi.PluginSet{},
|
||||
PreFilter: &schedulerapi.PluginSet{},
|
||||
Filter: &schedulerapi.PluginSet{},
|
||||
}
|
||||
var pluginConfigs []schedulerapi.PluginConfig
|
||||
f(®istry, plugins, pluginConfigs)
|
||||
@ -718,7 +714,8 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache internalcache.C
|
||||
func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, scache internalcache.Cache, informerFactory informers.SharedInformerFactory, f st.RegisterPluginFunc, stop chan struct{}, bindingTime time.Duration) (*Scheduler, chan *v1.Binding) {
|
||||
registry := framework.Registry{}
|
||||
plugins := &schedulerapi.Plugins{
|
||||
Filter: &schedulerapi.PluginSet{},
|
||||
PreFilter: &schedulerapi.PluginSet{},
|
||||
Filter: &schedulerapi.PluginSet{},
|
||||
}
|
||||
var pluginConfigs []schedulerapi.PluginConfig
|
||||
f(®istry, plugins, pluginConfigs)
|
||||
|
@ -22,7 +22,6 @@ go_test(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/selection:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
],
|
||||
|
@ -26,21 +26,6 @@ import (
|
||||
extenderv1 "k8s.io/kubernetes/pkg/scheduler/apis/extender/v1"
|
||||
)
|
||||
|
||||
// GetContainerPorts returns the used host ports of Pods: if 'port' was used, a 'port:true' pair
|
||||
// will be in the result; but it does not resolve port conflict.
|
||||
func GetContainerPorts(pods ...*v1.Pod) []*v1.ContainerPort {
|
||||
var ports []*v1.ContainerPort
|
||||
for _, pod := range pods {
|
||||
for j := range pod.Spec.Containers {
|
||||
container := &pod.Spec.Containers[j]
|
||||
for k := range container.Ports {
|
||||
ports = append(ports, &container.Ports[k])
|
||||
}
|
||||
}
|
||||
}
|
||||
return ports
|
||||
}
|
||||
|
||||
// GetPodFullName returns a name that uniquely identifies a pod.
|
||||
func GetPodFullName(pod *v1.Pod) string {
|
||||
// Use underscore as the delimiter because it is not allowed in pod name
|
||||
|
@ -18,131 +18,14 @@ package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
extenderv1 "k8s.io/kubernetes/pkg/scheduler/apis/extender/v1"
|
||||
)
|
||||
|
||||
// TestSortableList tests SortableList by storing pods in the list and sorting
|
||||
// them by their priority.
|
||||
|
||||
func TestGetContainerPorts(t *testing.T) {
|
||||
tests := []struct {
|
||||
pod1 *v1.Pod
|
||||
pod2 *v1.Pod
|
||||
expected []*v1.ContainerPort
|
||||
}{
|
||||
{
|
||||
pod1: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8001,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8002,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8003,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8004,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pod2: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8011,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8012,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8013,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8014,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: []*v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: 8001,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8002,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8003,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8004,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8011,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8012,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8013,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
{
|
||||
ContainerPort: 8014,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
result := GetContainerPorts(test.pod1, test.pod2)
|
||||
if !reflect.DeepEqual(test.expected, result) {
|
||||
t.Errorf("Got different result than expected.\nDifference detected on:\n%s", diff.ObjectGoPrintSideBySide(test.expected, result))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPodFullName(t *testing.T) {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
Loading…
Reference in New Issue
Block a user