mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
remove scheduler ServiceAffinity plugin
Signed-off-by: kerthcet <kerthcet@gmail.com>
This commit is contained in:
parent
80056f73a6
commit
fc9533e72f
@ -377,7 +377,6 @@ API rule violation: list_type_missing,k8s.io/kube-scheduler/config/v1,Policy,Pre
|
||||
API rule violation: list_type_missing,k8s.io/kube-scheduler/config/v1,Policy,Priorities
|
||||
API rule violation: list_type_missing,k8s.io/kube-scheduler/config/v1,RequestedToCapacityRatioArguments,Resources
|
||||
API rule violation: list_type_missing,k8s.io/kube-scheduler/config/v1,RequestedToCapacityRatioArguments,Shape
|
||||
API rule violation: list_type_missing,k8s.io/kube-scheduler/config/v1,ServiceAffinity,Labels
|
||||
API rule violation: list_type_missing,k8s.io/kubelet/config/v1alpha1,CredentialProvider,Args
|
||||
API rule violation: list_type_missing,k8s.io/kubelet/config/v1alpha1,CredentialProvider,Env
|
||||
API rule violation: list_type_missing,k8s.io/kubelet/config/v1alpha1,CredentialProvider,MatchImages
|
||||
|
@ -74,9 +74,6 @@ type PriorityPolicy struct {
|
||||
// PredicateArgument represents the arguments to configure predicate functions in scheduler policy configuration.
|
||||
// Only one of its members may be specified
|
||||
type PredicateArgument struct {
|
||||
// The predicate that provides affinity for pods belonging to a service
|
||||
// It uses a label to identify nodes that belong to the same "group"
|
||||
ServiceAffinity *ServiceAffinity
|
||||
// The predicate that checks whether a particular node has a certain label
|
||||
// defined or not, regardless of value
|
||||
LabelsPresence *LabelsPresence
|
||||
@ -85,9 +82,6 @@ type PredicateArgument struct {
|
||||
// PriorityArgument represents the arguments to configure priority functions in scheduler policy configuration.
|
||||
// Only one of its members may be specified
|
||||
type PriorityArgument struct {
|
||||
// The priority function that ensures a good spread (anti-affinity) for pods belonging to a service
|
||||
// It uses a label to identify nodes that belong to the same "group"
|
||||
ServiceAntiAffinity *ServiceAntiAffinity
|
||||
// The priority function that checks whether a particular node has a certain label
|
||||
// defined or not, regardless of value
|
||||
LabelPreference *LabelPreference
|
||||
@ -95,13 +89,6 @@ type PriorityArgument struct {
|
||||
RequestedToCapacityRatioArguments *RequestedToCapacityRatioArguments
|
||||
}
|
||||
|
||||
// ServiceAffinity holds the parameters that are used to configure the corresponding predicate in scheduler policy configuration.
|
||||
type ServiceAffinity struct {
|
||||
// The list of labels that identify node "groups"
|
||||
// All of the labels should match for the node to be considered a fit for hosting the pod
|
||||
Labels []string
|
||||
}
|
||||
|
||||
// LabelsPresence holds the parameters that are used to configure the corresponding predicate in scheduler policy configuration.
|
||||
type LabelsPresence struct {
|
||||
// The list of labels that identify node "groups"
|
||||
@ -111,12 +98,6 @@ type LabelsPresence struct {
|
||||
Presence bool
|
||||
}
|
||||
|
||||
// ServiceAntiAffinity holds the parameters that are used to configure the corresponding priority function
|
||||
type ServiceAntiAffinity struct {
|
||||
// Used to identify node "groups"
|
||||
Label string
|
||||
}
|
||||
|
||||
// LabelPreference holds the parameters that are used to configure the corresponding priority function
|
||||
type LabelPreference struct {
|
||||
// Used to identify node "groups"
|
||||
|
@ -45,7 +45,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
&NodeResourcesFitArgs{},
|
||||
&PodTopologySpreadArgs{},
|
||||
&RequestedToCapacityRatioArgs{},
|
||||
&ServiceAffinityArgs{},
|
||||
&VolumeBindingArgs{},
|
||||
&NodeResourcesLeastAllocatedArgs{},
|
||||
&NodeResourcesMostAllocatedArgs{},
|
||||
|
@ -198,25 +198,6 @@ type ResourceSpec struct {
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ServiceAffinityArgs holds arguments used to configure the ServiceAffinity
|
||||
// plugin.
|
||||
//
|
||||
// This plugin has been deprecated and is only configurable through the
|
||||
// scheduler policy API and the v1beta1 component config API. It is recommended
|
||||
// to use the InterPodAffinity plugin instead.
|
||||
type ServiceAffinityArgs struct {
|
||||
metav1.TypeMeta
|
||||
|
||||
// AffinityLabels are homogeneous for pods that are scheduled to a node.
|
||||
// (i.e. it returns true IFF this pod can be added to this node such that all other pods in
|
||||
// the same service are running on nodes with the exact same values for Labels).
|
||||
AffinityLabels []string
|
||||
// AntiAffinityLabelsPreference are the labels to consider for service anti affinity scoring.
|
||||
AntiAffinityLabelsPreference []string
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// VolumeBindingArgs holds arguments used to configure the VolumeBinding plugin.
|
||||
type VolumeBindingArgs struct {
|
||||
metav1.TypeMeta
|
||||
|
@ -147,26 +147,6 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.ServiceAffinity)(nil), (*config.ServiceAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_ServiceAffinity_To_config_ServiceAffinity(a.(*v1.ServiceAffinity), b.(*config.ServiceAffinity), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*config.ServiceAffinity)(nil), (*v1.ServiceAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_config_ServiceAffinity_To_v1_ServiceAffinity(a.(*config.ServiceAffinity), b.(*v1.ServiceAffinity), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.ServiceAntiAffinity)(nil), (*config.ServiceAntiAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_ServiceAntiAffinity_To_config_ServiceAntiAffinity(a.(*v1.ServiceAntiAffinity), b.(*config.ServiceAntiAffinity), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*config.ServiceAntiAffinity)(nil), (*v1.ServiceAntiAffinity)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_config_ServiceAntiAffinity_To_v1_ServiceAntiAffinity(a.(*config.ServiceAntiAffinity), b.(*v1.ServiceAntiAffinity), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.UtilizationShapePoint)(nil), (*config.UtilizationShapePoint)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_UtilizationShapePoint_To_config_UtilizationShapePoint(a.(*v1.UtilizationShapePoint), b.(*config.UtilizationShapePoint), scope)
|
||||
}); err != nil {
|
||||
@ -339,7 +319,6 @@ func Convert_config_Policy_To_v1_Policy(in *config.Policy, out *v1.Policy, s con
|
||||
}
|
||||
|
||||
func autoConvert_v1_PredicateArgument_To_config_PredicateArgument(in *v1.PredicateArgument, out *config.PredicateArgument, s conversion.Scope) error {
|
||||
out.ServiceAffinity = (*config.ServiceAffinity)(unsafe.Pointer(in.ServiceAffinity))
|
||||
out.LabelsPresence = (*config.LabelsPresence)(unsafe.Pointer(in.LabelsPresence))
|
||||
return nil
|
||||
}
|
||||
@ -350,7 +329,6 @@ func Convert_v1_PredicateArgument_To_config_PredicateArgument(in *v1.PredicateAr
|
||||
}
|
||||
|
||||
func autoConvert_config_PredicateArgument_To_v1_PredicateArgument(in *config.PredicateArgument, out *v1.PredicateArgument, s conversion.Scope) error {
|
||||
out.ServiceAffinity = (*v1.ServiceAffinity)(unsafe.Pointer(in.ServiceAffinity))
|
||||
out.LabelsPresence = (*v1.LabelsPresence)(unsafe.Pointer(in.LabelsPresence))
|
||||
return nil
|
||||
}
|
||||
@ -383,7 +361,6 @@ func Convert_config_PredicatePolicy_To_v1_PredicatePolicy(in *config.PredicatePo
|
||||
}
|
||||
|
||||
func autoConvert_v1_PriorityArgument_To_config_PriorityArgument(in *v1.PriorityArgument, out *config.PriorityArgument, s conversion.Scope) error {
|
||||
out.ServiceAntiAffinity = (*config.ServiceAntiAffinity)(unsafe.Pointer(in.ServiceAntiAffinity))
|
||||
out.LabelPreference = (*config.LabelPreference)(unsafe.Pointer(in.LabelPreference))
|
||||
out.RequestedToCapacityRatioArguments = (*config.RequestedToCapacityRatioArguments)(unsafe.Pointer(in.RequestedToCapacityRatioArguments))
|
||||
return nil
|
||||
@ -395,7 +372,6 @@ func Convert_v1_PriorityArgument_To_config_PriorityArgument(in *v1.PriorityArgum
|
||||
}
|
||||
|
||||
func autoConvert_config_PriorityArgument_To_v1_PriorityArgument(in *config.PriorityArgument, out *v1.PriorityArgument, s conversion.Scope) error {
|
||||
out.ServiceAntiAffinity = (*v1.ServiceAntiAffinity)(unsafe.Pointer(in.ServiceAntiAffinity))
|
||||
out.LabelPreference = (*v1.LabelPreference)(unsafe.Pointer(in.LabelPreference))
|
||||
out.RequestedToCapacityRatioArguments = (*v1.RequestedToCapacityRatioArguments)(unsafe.Pointer(in.RequestedToCapacityRatioArguments))
|
||||
return nil
|
||||
@ -474,46 +450,6 @@ func Convert_config_ResourceSpec_To_v1_ResourceSpec(in *config.ResourceSpec, out
|
||||
return autoConvert_config_ResourceSpec_To_v1_ResourceSpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_ServiceAffinity_To_config_ServiceAffinity(in *v1.ServiceAffinity, out *config.ServiceAffinity, s conversion.Scope) error {
|
||||
out.Labels = *(*[]string)(unsafe.Pointer(&in.Labels))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1_ServiceAffinity_To_config_ServiceAffinity is an autogenerated conversion function.
|
||||
func Convert_v1_ServiceAffinity_To_config_ServiceAffinity(in *v1.ServiceAffinity, out *config.ServiceAffinity, s conversion.Scope) error {
|
||||
return autoConvert_v1_ServiceAffinity_To_config_ServiceAffinity(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_config_ServiceAffinity_To_v1_ServiceAffinity(in *config.ServiceAffinity, out *v1.ServiceAffinity, s conversion.Scope) error {
|
||||
out.Labels = *(*[]string)(unsafe.Pointer(&in.Labels))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_config_ServiceAffinity_To_v1_ServiceAffinity is an autogenerated conversion function.
|
||||
func Convert_config_ServiceAffinity_To_v1_ServiceAffinity(in *config.ServiceAffinity, out *v1.ServiceAffinity, s conversion.Scope) error {
|
||||
return autoConvert_config_ServiceAffinity_To_v1_ServiceAffinity(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_ServiceAntiAffinity_To_config_ServiceAntiAffinity(in *v1.ServiceAntiAffinity, out *config.ServiceAntiAffinity, s conversion.Scope) error {
|
||||
out.Label = in.Label
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1_ServiceAntiAffinity_To_config_ServiceAntiAffinity is an autogenerated conversion function.
|
||||
func Convert_v1_ServiceAntiAffinity_To_config_ServiceAntiAffinity(in *v1.ServiceAntiAffinity, out *config.ServiceAntiAffinity, s conversion.Scope) error {
|
||||
return autoConvert_v1_ServiceAntiAffinity_To_config_ServiceAntiAffinity(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_config_ServiceAntiAffinity_To_v1_ServiceAntiAffinity(in *config.ServiceAntiAffinity, out *v1.ServiceAntiAffinity, s conversion.Scope) error {
|
||||
out.Label = in.Label
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_config_ServiceAntiAffinity_To_v1_ServiceAntiAffinity is an autogenerated conversion function.
|
||||
func Convert_config_ServiceAntiAffinity_To_v1_ServiceAntiAffinity(in *config.ServiceAntiAffinity, out *v1.ServiceAntiAffinity, s conversion.Scope) error {
|
||||
return autoConvert_config_ServiceAntiAffinity_To_v1_ServiceAntiAffinity(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_UtilizationShapePoint_To_config_UtilizationShapePoint(in *v1.UtilizationShapePoint, out *config.UtilizationShapePoint, s conversion.Scope) error {
|
||||
out.Utilization = in.Utilization
|
||||
out.Score = in.Score
|
||||
|
@ -363,10 +363,6 @@ func validateCustomPriorities(priorities map[string]config.PriorityPolicy, prior
|
||||
if err := verifyDifferentWeights("LabelPreference"); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if priority.Argument.ServiceAntiAffinity != nil {
|
||||
if err := verifyDifferentWeights("ServiceAntiAffinity"); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if priority.Argument.RequestedToCapacityRatioArguments != nil {
|
||||
if err := verifyRedeclaration("RequestedToCapacityRatio"); err != nil {
|
||||
return err
|
||||
|
@ -502,7 +502,7 @@ func TestValidateKubeSchedulerConfigurationV1beta3(t *testing.T) {
|
||||
badRemovedPlugins1.Profiles[0].Plugins.Score.Enabled = append(badRemovedPlugins1.Profiles[0].Plugins.Score.Enabled, config.Plugin{Name: "ServiceAffinity", Weight: 2})
|
||||
|
||||
badRemovedPlugins2 := validConfig.DeepCopy()
|
||||
badRemovedPlugins2.Profiles[0].Plugins.Score.Enabled = append(badRemovedPlugins2.Profiles[0].Plugins.Score.Enabled, config.Plugin{Name: "ServiceAffinity", Weight: 2})
|
||||
badRemovedPlugins2.Profiles[0].Plugins.Score.Enabled = append(badRemovedPlugins2.Profiles[0].Plugins.Score.Enabled, config.Plugin{Name: "RequestedToCapacityRatio", Weight: 2})
|
||||
|
||||
badRemovedPlugins3 := validConfig.DeepCopy()
|
||||
badRemovedPlugins3.Profiles[0].Plugins.Score.Enabled = append(badRemovedPlugins3.Profiles[0].Plugins.Score.Enabled, config.Plugin{Name: "NodeResourcesMostAllocated", Weight: 2})
|
||||
@ -731,16 +731,6 @@ func TestValidatePolicy(t *testing.T) {
|
||||
},
|
||||
expected: errors.New("LabelPreference priority \"customPriority2\" has a different weight with \"customPriority1\""),
|
||||
},
|
||||
{
|
||||
name: "different weights for ServiceAntiAffinity custom priority",
|
||||
policy: config.Policy{
|
||||
Priorities: []config.PriorityPolicy{
|
||||
{Name: "customPriority1", Weight: 1, Argument: &config.PriorityArgument{ServiceAntiAffinity: &config.ServiceAntiAffinity{}}},
|
||||
{Name: "customPriority2", Weight: 2, Argument: &config.PriorityArgument{ServiceAntiAffinity: &config.ServiceAntiAffinity{}}},
|
||||
},
|
||||
},
|
||||
expected: errors.New("ServiceAntiAffinity priority \"customPriority2\" has a different weight with \"customPriority1\""),
|
||||
},
|
||||
{
|
||||
name: "invalid hardPodAffinitySymmetricWeight, above the range",
|
||||
policy: config.Policy{
|
||||
|
82
pkg/scheduler/apis/config/zz_generated.deepcopy.go
generated
82
pkg/scheduler/apis/config/zz_generated.deepcopy.go
generated
@ -631,11 +631,6 @@ func (in *Policy) DeepCopyObject() runtime.Object {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PredicateArgument) DeepCopyInto(out *PredicateArgument) {
|
||||
*out = *in
|
||||
if in.ServiceAffinity != nil {
|
||||
in, out := &in.ServiceAffinity, &out.ServiceAffinity
|
||||
*out = new(ServiceAffinity)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.LabelsPresence != nil {
|
||||
in, out := &in.LabelsPresence, &out.LabelsPresence
|
||||
*out = new(LabelsPresence)
|
||||
@ -678,11 +673,6 @@ func (in *PredicatePolicy) DeepCopy() *PredicatePolicy {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PriorityArgument) DeepCopyInto(out *PriorityArgument) {
|
||||
*out = *in
|
||||
if in.ServiceAntiAffinity != nil {
|
||||
in, out := &in.ServiceAntiAffinity, &out.ServiceAntiAffinity
|
||||
*out = new(ServiceAntiAffinity)
|
||||
**out = **in
|
||||
}
|
||||
if in.LabelPreference != nil {
|
||||
in, out := &in.LabelPreference, &out.LabelPreference
|
||||
*out = new(LabelPreference)
|
||||
@ -851,78 +841,6 @@ func (in *ScoringStrategy) DeepCopy() *ScoringStrategy {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceAffinity) DeepCopyInto(out *ServiceAffinity) {
|
||||
*out = *in
|
||||
if in.Labels != nil {
|
||||
in, out := &in.Labels, &out.Labels
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAffinity.
|
||||
func (in *ServiceAffinity) DeepCopy() *ServiceAffinity {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceAffinity)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceAffinityArgs) DeepCopyInto(out *ServiceAffinityArgs) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.AffinityLabels != nil {
|
||||
in, out := &in.AffinityLabels, &out.AffinityLabels
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.AntiAffinityLabelsPreference != nil {
|
||||
in, out := &in.AntiAffinityLabelsPreference, &out.AntiAffinityLabelsPreference
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAffinityArgs.
|
||||
func (in *ServiceAffinityArgs) DeepCopy() *ServiceAffinityArgs {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceAffinityArgs)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ServiceAffinityArgs) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceAntiAffinity) DeepCopyInto(out *ServiceAntiAffinity) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAntiAffinity.
|
||||
func (in *ServiceAntiAffinity) DeepCopy() *ServiceAntiAffinity {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceAntiAffinity)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *UtilizationShapePoint) DeepCopyInto(out *UtilizationShapePoint) {
|
||||
*out = *in
|
||||
|
@ -399,11 +399,6 @@ func addAllEventHandlers(
|
||||
},
|
||||
)
|
||||
}
|
||||
case framework.Service:
|
||||
// ServiceAffinity: affected by the selector of the service is updated.
|
||||
informerFactory.Core().V1().Services().Informer().AddEventHandler(
|
||||
buildEvtResHandler(at, framework.Service, "Service"),
|
||||
)
|
||||
default:
|
||||
// Tests may not instantiate dynInformerFactory.
|
||||
if dynInformerFactory == nil {
|
||||
|
@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog/v2"
|
||||
@ -38,7 +38,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/selectorspread"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions"
|
||||
@ -106,8 +105,6 @@ const (
|
||||
CheckNodeUnschedulablePred = "CheckNodeUnschedulable"
|
||||
// CheckNodeLabelPresencePred defines the name of predicate CheckNodeLabelPresence.
|
||||
CheckNodeLabelPresencePred = "CheckNodeLabelPresence"
|
||||
// CheckServiceAffinityPred defines the name of predicate checkServiceAffinity.
|
||||
CheckServiceAffinityPred = "CheckServiceAffinity"
|
||||
// MaxEBSVolumeCountPred defines the name of predicate MaxEBSVolumeCount.
|
||||
// DEPRECATED
|
||||
// All cloudprovider specific predicates are deprecated in favour of MaxCSIVolumeCountPred.
|
||||
@ -138,7 +135,7 @@ var predicateOrdering = []string{
|
||||
GeneralPred, HostNamePred, PodFitsHostPortsPred,
|
||||
MatchNodeSelectorPred, PodFitsResourcesPred, NoDiskConflictPred,
|
||||
PodToleratesNodeTaintsPred, CheckNodeLabelPresencePred,
|
||||
CheckServiceAffinityPred, MaxEBSVolumeCountPred, MaxGCEPDVolumeCountPred, MaxCSIVolumeCountPred,
|
||||
MaxEBSVolumeCountPred, MaxGCEPDVolumeCountPred, MaxCSIVolumeCountPred,
|
||||
MaxAzureDiskVolumeCountPred, MaxCinderVolumeCountPred, CheckVolumeBindingPred, NoVolumeZoneConflictPred,
|
||||
EvenPodsSpreadPred, MatchInterPodAffinityPred,
|
||||
}
|
||||
@ -166,8 +163,6 @@ type ConfigProducerArgs struct {
|
||||
NodeLabelArgs *config.NodeLabelArgs
|
||||
// RequestedToCapacityRatioArgs is the args for the RequestedToCapacityRatio plugin.
|
||||
RequestedToCapacityRatioArgs *config.RequestedToCapacityRatioArgs
|
||||
// ServiceAffinityArgs is the args for the ServiceAffinity plugin.
|
||||
ServiceAffinityArgs *config.ServiceAffinityArgs
|
||||
// NodeResourcesFitArgs is the args for the NodeResources fit filter.
|
||||
NodeResourcesFitArgs *config.NodeResourcesFitArgs
|
||||
// InterPodAffinityArgs is the args for InterPodAffinity plugin
|
||||
@ -315,15 +310,6 @@ func NewLegacyRegistry() *LegacyRegistry {
|
||||
config.PluginConfig{Name: nodelabel.Name, Args: args.NodeLabelArgs})
|
||||
}
|
||||
})
|
||||
registry.registerPredicateConfigProducer(CheckServiceAffinityPred,
|
||||
func(args ConfigProducerArgs, plugins *config.Plugins, pluginConfig *[]config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, serviceaffinity.Name, nil)
|
||||
if args.ServiceAffinityArgs != nil {
|
||||
*pluginConfig = append(*pluginConfig,
|
||||
config.PluginConfig{Name: serviceaffinity.Name, Args: args.ServiceAffinityArgs})
|
||||
}
|
||||
plugins.PreFilter = appendToPluginSet(plugins.PreFilter, serviceaffinity.Name, nil)
|
||||
})
|
||||
registry.registerPredicateConfigProducer(EvenPodsSpreadPred,
|
||||
func(_ ConfigProducerArgs, plugins *config.Plugins, _ *[]config.PluginConfig) {
|
||||
plugins.PreFilter = appendToPluginSet(plugins.PreFilter, podtopologyspread.Name, nil)
|
||||
@ -430,18 +416,6 @@ func NewLegacyRegistry() *LegacyRegistry {
|
||||
config.PluginConfig{Name: nodelabel.Name, Args: args.NodeLabelArgs})
|
||||
}
|
||||
})
|
||||
registry.registerPriorityConfigProducer(serviceaffinity.Name,
|
||||
func(args ConfigProducerArgs, plugins *config.Plugins, pluginConfig *[]config.PluginConfig) {
|
||||
// If there are n ServiceAffinity priorities in the policy, the weight for the corresponding
|
||||
// score plugin is n*weight (note that the validation logic verifies that all ServiceAffinity
|
||||
// priorities specified in Policy have the same weight).
|
||||
weight := args.Weight * int32(len(args.ServiceAffinityArgs.AntiAffinityLabelsPreference))
|
||||
plugins.Score = appendToPluginSet(plugins.Score, serviceaffinity.Name, &weight)
|
||||
if args.ServiceAffinityArgs != nil {
|
||||
*pluginConfig = append(*pluginConfig,
|
||||
config.PluginConfig{Name: serviceaffinity.Name, Args: args.ServiceAffinityArgs})
|
||||
}
|
||||
})
|
||||
registry.registerPriorityConfigProducer(EvenPodsSpreadPriority,
|
||||
func(args ConfigProducerArgs, plugins *config.Plugins, pluginConfig *[]config.PluginConfig) {
|
||||
plugins.PreScore = appendToPluginSet(plugins.PreScore, podtopologyspread.Name, nil)
|
||||
@ -579,25 +553,10 @@ func (lr *LegacyRegistry) ProcessPredicatePolicy(policy config.PredicatePolicy,
|
||||
return predicateName, nil
|
||||
}
|
||||
|
||||
if policy.Argument == nil || (policy.Argument.ServiceAffinity == nil &&
|
||||
policy.Argument.LabelsPresence == nil) {
|
||||
if policy.Argument == nil || policy.Argument.LabelsPresence == nil {
|
||||
return "", fmt.Errorf("predicate type not found for %q", predicateName)
|
||||
}
|
||||
|
||||
// generate the predicate function, if a custom type is requested
|
||||
if policy.Argument.ServiceAffinity != nil {
|
||||
// map LabelsPresence policy to ConfigProducerArgs that's used to configure the ServiceAffinity plugin.
|
||||
if pluginArgs.ServiceAffinityArgs == nil {
|
||||
pluginArgs.ServiceAffinityArgs = &config.ServiceAffinityArgs{}
|
||||
}
|
||||
pluginArgs.ServiceAffinityArgs.AffinityLabels = append(pluginArgs.ServiceAffinityArgs.AffinityLabels, policy.Argument.ServiceAffinity.Labels...)
|
||||
|
||||
// We use the ServiceAffinity predicate name for all ServiceAffinity custom predicates.
|
||||
// It may get called multiple times but we essentially only register one instance of ServiceAffinity predicate.
|
||||
// This name is then used to find the registered plugin and run the plugin instead of the predicate.
|
||||
predicateName = CheckServiceAffinityPred
|
||||
}
|
||||
|
||||
if policy.Argument.LabelsPresence != nil {
|
||||
// Map LabelPresence policy to ConfigProducerArgs that's used to configure the NodeLabel plugin.
|
||||
if pluginArgs.NodeLabelArgs == nil {
|
||||
@ -638,27 +597,11 @@ func (lr *LegacyRegistry) ProcessPriorityPolicy(policy config.PriorityPolicy, co
|
||||
|
||||
// generate the priority function, if a custom priority is requested
|
||||
if policy.Argument == nil ||
|
||||
(policy.Argument.ServiceAntiAffinity == nil &&
|
||||
policy.Argument.RequestedToCapacityRatioArguments == nil &&
|
||||
(policy.Argument.RequestedToCapacityRatioArguments == nil &&
|
||||
policy.Argument.LabelPreference == nil) {
|
||||
return "", fmt.Errorf("priority type not found for %q", priorityName)
|
||||
}
|
||||
|
||||
if policy.Argument.ServiceAntiAffinity != nil {
|
||||
// We use the ServiceAffinity plugin name for all ServiceAffinity custom priorities.
|
||||
// It may get called multiple times but we essentially only register one instance of
|
||||
// ServiceAffinity priority.
|
||||
// This name is then used to find the registered plugin and run the plugin instead of the priority.
|
||||
priorityName = serviceaffinity.Name
|
||||
if configProducerArgs.ServiceAffinityArgs == nil {
|
||||
configProducerArgs.ServiceAffinityArgs = &config.ServiceAffinityArgs{}
|
||||
}
|
||||
configProducerArgs.ServiceAffinityArgs.AntiAffinityLabelsPreference = append(
|
||||
configProducerArgs.ServiceAffinityArgs.AntiAffinityLabelsPreference,
|
||||
policy.Argument.ServiceAntiAffinity.Label,
|
||||
)
|
||||
}
|
||||
|
||||
if policy.Argument.LabelPreference != nil {
|
||||
// We use the NodeLabel plugin name for all NodeLabel custom priorities.
|
||||
// It may get called multiple times but we essentially only register one instance of NodeLabel priority.
|
||||
@ -712,9 +655,6 @@ func (lr *LegacyRegistry) ProcessPriorityPolicy(policy config.PriorityPolicy, co
|
||||
func validatePredicate(predicate config.PredicatePolicy) error {
|
||||
if predicate.Argument != nil {
|
||||
numArgs := 0
|
||||
if predicate.Argument.ServiceAffinity != nil {
|
||||
numArgs++
|
||||
}
|
||||
if predicate.Argument.LabelsPresence != nil {
|
||||
numArgs++
|
||||
}
|
||||
@ -728,9 +668,6 @@ func validatePredicate(predicate config.PredicatePolicy) error {
|
||||
func validatePriority(priority config.PriorityPolicy) error {
|
||||
if priority.Argument != nil {
|
||||
numArgs := 0
|
||||
if priority.Argument.ServiceAntiAffinity != nil {
|
||||
numArgs++
|
||||
}
|
||||
if priority.Argument.LabelPreference != nil {
|
||||
numArgs++
|
||||
}
|
||||
|
@ -35,7 +35,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/selectorspread"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions"
|
||||
@ -82,7 +81,6 @@ func NewInTreeRegistry() runtime.Registry {
|
||||
nodevolumelimits.CinderName: runtime.FactoryAdapter(fts, nodevolumelimits.NewCinder),
|
||||
interpodaffinity.Name: runtime.FactoryAdapter(fts, interpodaffinity.New),
|
||||
nodelabel.Name: nodelabel.New,
|
||||
serviceaffinity.Name: serviceaffinity.New,
|
||||
queuesort.Name: queuesort.New,
|
||||
defaultbinder.Name: defaultbinder.New,
|
||||
defaultpreemption.Name: runtime.FactoryAdapter(fts, defaultpreemption.New),
|
||||
|
@ -1,463 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This plugin has been deprecated and is only configurable through the
|
||||
// scheduler policy API and the v1beta1 component config API. It is recommended
|
||||
// to use the InterPodAffinity plugin instead.
|
||||
package serviceaffinity
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
|
||||
)
|
||||
|
||||
const (
|
||||
// Name is the name of the plugin used in the plugin registry and configurations.
|
||||
Name = names.ServiceAffinity
|
||||
|
||||
// preFilterStateKey is the key in CycleState to ServiceAffinity pre-computed data.
|
||||
// Using the name of the plugin will likely help us avoid collisions with other plugins.
|
||||
preFilterStateKey = "PreFilter" + Name
|
||||
|
||||
// ErrReason is used for CheckServiceAffinity predicate error.
|
||||
ErrReason = "node(s) didn't match service affinity"
|
||||
)
|
||||
|
||||
// preFilterState computed at PreFilter and used at Filter.
|
||||
type preFilterState struct {
|
||||
matchingPodList []*v1.Pod
|
||||
matchingPodServices []*v1.Service
|
||||
}
|
||||
|
||||
// Clone the prefilter state.
|
||||
func (s *preFilterState) Clone() framework.StateData {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
copy := preFilterState{}
|
||||
copy.matchingPodServices = append([]*v1.Service(nil),
|
||||
s.matchingPodServices...)
|
||||
copy.matchingPodList = append([]*v1.Pod(nil),
|
||||
s.matchingPodList...)
|
||||
|
||||
return ©
|
||||
}
|
||||
|
||||
// New initializes a new plugin and returns it.
|
||||
func New(plArgs runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
args, err := getArgs(plArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serviceLister := handle.SharedInformerFactory().Core().V1().Services().Lister()
|
||||
|
||||
klog.Warning("ServiceAffinity plugin is deprecated and will be removed in a future version; use InterPodAffinity instead")
|
||||
return &ServiceAffinity{
|
||||
sharedLister: handle.SnapshotSharedLister(),
|
||||
serviceLister: serviceLister,
|
||||
args: args,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getArgs(obj runtime.Object) (config.ServiceAffinityArgs, error) {
|
||||
ptr, ok := obj.(*config.ServiceAffinityArgs)
|
||||
if !ok {
|
||||
return config.ServiceAffinityArgs{}, fmt.Errorf("want args to be of type ServiceAffinityArgs, got %T", obj)
|
||||
}
|
||||
return *ptr, nil
|
||||
}
|
||||
|
||||
// ServiceAffinity is a plugin that checks service affinity.
|
||||
type ServiceAffinity struct {
|
||||
args config.ServiceAffinityArgs
|
||||
sharedLister framework.SharedLister
|
||||
serviceLister corelisters.ServiceLister
|
||||
}
|
||||
|
||||
var _ framework.PreFilterPlugin = &ServiceAffinity{}
|
||||
var _ framework.FilterPlugin = &ServiceAffinity{}
|
||||
var _ framework.ScorePlugin = &ServiceAffinity{}
|
||||
var _ framework.EnqueueExtensions = &ServiceAffinity{}
|
||||
|
||||
// Name returns name of the plugin. It is used in logs, etc.
|
||||
func (pl *ServiceAffinity) Name() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func (pl *ServiceAffinity) createPreFilterState(pod *v1.Pod) (*preFilterState, error) {
|
||||
if pod == nil {
|
||||
return nil, fmt.Errorf("a pod is required to calculate service affinity preFilterState")
|
||||
}
|
||||
// Store services which match the pod.
|
||||
matchingPodServices, err := helper.GetPodServices(pl.serviceLister, pod)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing pod services: %w", err)
|
||||
}
|
||||
selector := createSelectorFromLabels(pod.Labels)
|
||||
|
||||
// consider only the pods that belong to the same namespace
|
||||
nodeInfos, err := pl.sharedLister.NodeInfos().List()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing nodeInfos: %w", err)
|
||||
}
|
||||
matchingPodList := filterPods(nodeInfos, selector, pod.Namespace)
|
||||
|
||||
return &preFilterState{
|
||||
matchingPodList: matchingPodList,
|
||||
matchingPodServices: matchingPodServices,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PreFilter invoked at the prefilter extension point.
|
||||
func (pl *ServiceAffinity) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) *framework.Status {
|
||||
if len(pl.args.AffinityLabels) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
s, err := pl.createPreFilterState(pod)
|
||||
if err != nil {
|
||||
return framework.AsStatus(fmt.Errorf("could not create preFilterState: %w", err))
|
||||
}
|
||||
cycleState.Write(preFilterStateKey, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// PreFilterExtensions returns prefilter extensions, pod add and remove.
|
||||
func (pl *ServiceAffinity) PreFilterExtensions() framework.PreFilterExtensions {
|
||||
if len(pl.args.AffinityLabels) == 0 {
|
||||
return nil
|
||||
}
|
||||
return pl
|
||||
}
|
||||
|
||||
// AddPod from pre-computed data in cycleState.
|
||||
func (pl *ServiceAffinity) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podInfoToAdd *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
|
||||
s, err := getPreFilterState(cycleState)
|
||||
if err != nil {
|
||||
return framework.AsStatus(err)
|
||||
}
|
||||
|
||||
// If addedPod is in the same namespace as the pod, update the list
|
||||
// of matching pods if applicable.
|
||||
if podInfoToAdd.Pod.Namespace != podToSchedule.Namespace {
|
||||
return nil
|
||||
}
|
||||
|
||||
selector := createSelectorFromLabels(podToSchedule.Labels)
|
||||
if selector.Matches(labels.Set(podInfoToAdd.Pod.Labels)) {
|
||||
s.matchingPodList = append(s.matchingPodList, podInfoToAdd.Pod)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemovePod from pre-computed data in cycleState.
|
||||
func (pl *ServiceAffinity) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podInfoToRemove *framework.PodInfo, nodeInfo *framework.NodeInfo) *framework.Status {
|
||||
s, err := getPreFilterState(cycleState)
|
||||
if err != nil {
|
||||
return framework.AsStatus(err)
|
||||
}
|
||||
|
||||
if len(s.matchingPodList) == 0 ||
|
||||
podInfoToRemove.Pod.Namespace != s.matchingPodList[0].Namespace {
|
||||
return nil
|
||||
}
|
||||
|
||||
for i, pod := range s.matchingPodList {
|
||||
if pod.Name == podInfoToRemove.Pod.Name && pod.Namespace == podInfoToRemove.Pod.Namespace {
|
||||
s.matchingPodList = append(s.matchingPodList[:i], s.matchingPodList[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error) {
|
||||
c, err := cycleState.Read(preFilterStateKey)
|
||||
if err != nil {
|
||||
// preFilterState doesn't exist, likely PreFilter wasn't invoked.
|
||||
return nil, fmt.Errorf("error reading %q from cycleState: %w", preFilterStateKey, err)
|
||||
}
|
||||
|
||||
if c == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
s, ok := c.(*preFilterState)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%+v convert to interpodaffinity.state error", c)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// EventsToRegister returns the possible events that may make a Pod
|
||||
// failed by this plugin schedulable.
|
||||
func (pl *ServiceAffinity) EventsToRegister() []framework.ClusterEvent {
|
||||
if len(pl.args.AffinityLabels) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return []framework.ClusterEvent{
|
||||
// Suppose there is a running Pod backs a Service, and the unschedulable Pod subjects
|
||||
// to the same Service, but failed because of mis-matched affinity labels.
|
||||
// - if the running Pod's labels get updated, it may not back the Service anymore, and
|
||||
// hence make the unschedulable Pod schedulable.
|
||||
// - if the running Pod gets deleted, the unschedulable Pod may also become schedulable.
|
||||
{Resource: framework.Pod, ActionType: framework.Update | framework.Delete},
|
||||
// A new Node or updating a Node's labels may make a Pod schedulable.
|
||||
{Resource: framework.Node, ActionType: framework.Add | framework.UpdateNodeLabel},
|
||||
// Update or delete of a Service may break the correlation of the Pods that previously
|
||||
// backed it, and hence make a Pod schedulable.
|
||||
{Resource: framework.Service, ActionType: framework.Update | framework.Delete},
|
||||
}
|
||||
}
|
||||
|
||||
// Filter matches nodes in such a way to force that
|
||||
// ServiceAffinity.labels are homogeneous for pods that are scheduled to a node.
|
||||
// (i.e. it returns true IFF this pod can be added to this node such that all other pods in
|
||||
// the same service are running on nodes with the exact same ServiceAffinity.label values).
|
||||
//
|
||||
// For example:
|
||||
// If the first pod of a service was scheduled to a node with label "region=foo",
|
||||
// all the other subsequent pods belong to the same service will be schedule on
|
||||
// nodes with the same "region=foo" label.
|
||||
//
|
||||
// Details:
|
||||
//
|
||||
// If (the svc affinity labels are not a subset of pod's label selectors )
|
||||
// The pod has all information necessary to check affinity, the pod's label selector is sufficient to calculate
|
||||
// the match.
|
||||
// Otherwise:
|
||||
// Create an "implicit selector" which guarantees pods will land on nodes with similar values
|
||||
// for the affinity labels.
|
||||
//
|
||||
// To do this, we "reverse engineer" a selector by introspecting existing pods running under the same service+namespace.
|
||||
// These backfilled labels in the selector "L" are defined like so:
|
||||
// - L is a label that the ServiceAffinity object needs as a matching constraint.
|
||||
// - L is not defined in the pod itself already.
|
||||
// - and SOME pod, from a service, in the same namespace, ALREADY scheduled onto a node, has a matching value.
|
||||
func (pl *ServiceAffinity) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
|
||||
if len(pl.args.AffinityLabels) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return framework.AsStatus(fmt.Errorf("node not found"))
|
||||
}
|
||||
|
||||
s, err := getPreFilterState(cycleState)
|
||||
if err != nil {
|
||||
return framework.AsStatus(err)
|
||||
}
|
||||
|
||||
pods, services := s.matchingPodList, s.matchingPodServices
|
||||
filteredPods := nodeInfo.FilterOutPods(pods)
|
||||
// check if the pod being scheduled has the affinity labels specified in its NodeSelector
|
||||
affinityLabels := findLabelsInSet(pl.args.AffinityLabels, labels.Set(pod.Spec.NodeSelector))
|
||||
// Step 1: If we don't have all constraints, introspect nodes to find the missing constraints.
|
||||
if len(pl.args.AffinityLabels) > len(affinityLabels) {
|
||||
if len(services) > 0 {
|
||||
if len(filteredPods) > 0 {
|
||||
nodeWithAffinityLabels, err := pl.sharedLister.NodeInfos().Get(filteredPods[0].Spec.NodeName)
|
||||
if err != nil {
|
||||
return framework.AsStatus(fmt.Errorf("node not found"))
|
||||
}
|
||||
addUnsetLabelsToMap(affinityLabels, pl.args.AffinityLabels, labels.Set(nodeWithAffinityLabels.Node().Labels))
|
||||
}
|
||||
}
|
||||
}
|
||||
// Step 2: Finally complete the affinity predicate based on whatever set of predicates we were able to find.
|
||||
if createSelectorFromLabels(affinityLabels).Matches(labels.Set(node.Labels)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return framework.NewStatus(framework.Unschedulable, ErrReason)
|
||||
}
|
||||
|
||||
// Score invoked at the Score extension point.
|
||||
func (pl *ServiceAffinity) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) {
|
||||
nodeInfo, err := pl.sharedLister.NodeInfos().Get(nodeName)
|
||||
if err != nil {
|
||||
return 0, framework.AsStatus(fmt.Errorf("getting node %q from Snapshot: %w", nodeName, err))
|
||||
}
|
||||
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return 0, framework.AsStatus(fmt.Errorf("node not found"))
|
||||
}
|
||||
|
||||
// Pods matched namespace,selector on current node.
|
||||
var selector labels.Selector
|
||||
if services, err := helper.GetPodServices(pl.serviceLister, pod); err == nil && len(services) > 0 {
|
||||
selector = labels.SelectorFromSet(services[0].Spec.Selector)
|
||||
} else {
|
||||
selector = labels.NewSelector()
|
||||
}
|
||||
|
||||
if len(nodeInfo.Pods) == 0 || selector.Empty() {
|
||||
return 0, nil
|
||||
}
|
||||
var score int64
|
||||
for _, existingPod := range nodeInfo.Pods {
|
||||
// Ignore pods being deleted for spreading purposes
|
||||
// Similar to how it is done for SelectorSpreadPriority
|
||||
if pod.Namespace == existingPod.Pod.Namespace && existingPod.Pod.DeletionTimestamp == nil {
|
||||
if selector.Matches(labels.Set(existingPod.Pod.Labels)) {
|
||||
score++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return score, nil
|
||||
}
|
||||
|
||||
// NormalizeScore invoked after scoring all nodes.
|
||||
func (pl *ServiceAffinity) NormalizeScore(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status {
|
||||
reduceResult := make([]float64, len(scores))
|
||||
for _, label := range pl.args.AntiAffinityLabelsPreference {
|
||||
if err := pl.updateNodeScoresForLabel(pl.sharedLister, scores, reduceResult, label); err != nil {
|
||||
return framework.AsStatus(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Update the result after all labels have been evaluated.
|
||||
for i, nodeScore := range reduceResult {
|
||||
scores[i].Score = int64(nodeScore)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateNodeScoresForLabel updates the node scores for a single label. Note it does not update the
|
||||
// original result from the map phase directly, but instead updates the reduceResult, which is used
|
||||
// to update the original result finally. This makes sure that each call to updateNodeScoresForLabel
|
||||
// receives the same mapResult to work with.
|
||||
// Why are doing this? This is a workaround for the migration from priorities to score plugins.
|
||||
// Historically the priority is designed to handle only one label, and multiple priorities are configured
|
||||
// to work with multiple labels. Using multiple plugins is not allowed in the new framework. Therefore
|
||||
// we need to modify the old priority to be able to handle multiple labels so that it can be mapped
|
||||
// to a single plugin.
|
||||
// TODO: This will be deprecated soon.
|
||||
func (pl *ServiceAffinity) updateNodeScoresForLabel(sharedLister framework.SharedLister, mapResult framework.NodeScoreList, reduceResult []float64, label string) error {
|
||||
var numServicePods int64
|
||||
var labelValue string
|
||||
podCounts := map[string]int64{}
|
||||
labelNodesStatus := map[string]string{}
|
||||
maxPriorityFloat64 := float64(framework.MaxNodeScore)
|
||||
|
||||
for _, nodePriority := range mapResult {
|
||||
numServicePods += nodePriority.Score
|
||||
nodeInfo, err := sharedLister.NodeInfos().Get(nodePriority.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !labels.Set(nodeInfo.Node().Labels).Has(label) {
|
||||
continue
|
||||
}
|
||||
|
||||
labelValue = labels.Set(nodeInfo.Node().Labels).Get(label)
|
||||
labelNodesStatus[nodePriority.Name] = labelValue
|
||||
podCounts[labelValue] += nodePriority.Score
|
||||
}
|
||||
|
||||
//score int - scale of 0-maxPriority
|
||||
// 0 being the lowest priority and maxPriority being the highest
|
||||
for i, nodePriority := range mapResult {
|
||||
labelValue, ok := labelNodesStatus[nodePriority.Name]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
// initializing to the default/max node score of maxPriority
|
||||
fScore := maxPriorityFloat64
|
||||
if numServicePods > 0 {
|
||||
fScore = maxPriorityFloat64 * (float64(numServicePods-podCounts[labelValue]) / float64(numServicePods))
|
||||
}
|
||||
// The score of current label only accounts for 1/len(s.labels) of the total score.
|
||||
// The policy API definition only allows a single label to be configured, associated with a weight.
|
||||
// This is compensated by the fact that the total weight is the sum of all weights configured
|
||||
// in each policy config.
|
||||
reduceResult[i] += fScore / float64(len(pl.args.AntiAffinityLabelsPreference))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ScoreExtensions of the Score plugin.
|
||||
func (pl *ServiceAffinity) ScoreExtensions() framework.ScoreExtensions {
|
||||
return pl
|
||||
}
|
||||
|
||||
// addUnsetLabelsToMap backfills missing values with values we find in a map.
|
||||
func addUnsetLabelsToMap(aL map[string]string, labelsToAdd []string, labelSet labels.Set) {
|
||||
for _, l := range labelsToAdd {
|
||||
// if the label is already there, don't overwrite it.
|
||||
if _, exists := aL[l]; exists {
|
||||
continue
|
||||
}
|
||||
// otherwise, backfill this label.
|
||||
if labelSet.Has(l) {
|
||||
aL[l] = labelSet.Get(l)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// createSelectorFromLabels is used to define a selector that corresponds to the keys in a map.
|
||||
func createSelectorFromLabels(aL map[string]string) labels.Selector {
|
||||
if len(aL) == 0 {
|
||||
return labels.Everything()
|
||||
}
|
||||
return labels.Set(aL).AsSelector()
|
||||
}
|
||||
|
||||
// filterPods filters pods outside a namespace from the given list.
|
||||
func filterPods(nodeInfos []*framework.NodeInfo, selector labels.Selector, ns string) []*v1.Pod {
|
||||
maxSize := 0
|
||||
for _, n := range nodeInfos {
|
||||
maxSize += len(n.Pods)
|
||||
}
|
||||
pods := make([]*v1.Pod, 0, maxSize)
|
||||
for _, n := range nodeInfos {
|
||||
for _, p := range n.Pods {
|
||||
if p.Pod.Namespace == ns && selector.Matches(labels.Set(p.Pod.Labels)) {
|
||||
pods = append(pods, p.Pod)
|
||||
}
|
||||
}
|
||||
}
|
||||
return pods
|
||||
}
|
||||
|
||||
// findLabelsInSet gets as many key/value pairs as possible out of a label set.
|
||||
func findLabelsInSet(labelsToKeep []string, selector labels.Set) map[string]string {
|
||||
aL := make(map[string]string)
|
||||
for _, l := range labelsToKeep {
|
||||
if selector.Has(l) {
|
||||
aL[l] = selector.Get(l)
|
||||
}
|
||||
}
|
||||
return aL
|
||||
}
|
@ -1,625 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package serviceaffinity
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/fake"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
|
||||
func TestServiceAffinity(t *testing.T) {
|
||||
selector := map[string]string{"foo": "bar"}
|
||||
labels1 := map[string]string{
|
||||
"region": "r1",
|
||||
"zone": "z11",
|
||||
}
|
||||
labels2 := map[string]string{
|
||||
"region": "r1",
|
||||
"zone": "z12",
|
||||
}
|
||||
labels3 := map[string]string{
|
||||
"region": "r2",
|
||||
"zone": "z21",
|
||||
}
|
||||
labels4 := map[string]string{
|
||||
"region": "r2",
|
||||
"zone": "z22",
|
||||
}
|
||||
node1 := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labels1}}
|
||||
node2 := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labels2}}
|
||||
node3 := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labels3}}
|
||||
node4 := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labels4}}
|
||||
node5 := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labels4}}
|
||||
tests := []struct {
|
||||
name string
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
services []*v1.Service
|
||||
node *v1.Node
|
||||
labels []string
|
||||
res framework.Code
|
||||
}{
|
||||
{
|
||||
name: "nothing scheduled",
|
||||
pod: new(v1.Pod),
|
||||
node: &node1,
|
||||
labels: []string{"region"},
|
||||
res: framework.Success,
|
||||
},
|
||||
{
|
||||
name: "pod with region label match",
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeSelector: map[string]string{"region": "r1"}}},
|
||||
node: &node1,
|
||||
labels: []string{"region"},
|
||||
res: framework.Success,
|
||||
},
|
||||
{
|
||||
name: "pod with region label mismatch",
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeSelector: map[string]string{"region": "r2"}}},
|
||||
node: &node1,
|
||||
labels: []string{"region"},
|
||||
res: framework.Unschedulable,
|
||||
},
|
||||
{
|
||||
name: "service pod on same node",
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", Labels: selector, UID: types.UID("pod1")}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Name: "pod2", Labels: selector, UID: types.UID("pod2")}}},
|
||||
node: &node1,
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}}},
|
||||
labels: []string{"region"},
|
||||
res: framework.Success,
|
||||
},
|
||||
{
|
||||
name: "service pod on different node, region match",
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", Labels: selector, UID: types.UID("pod1")}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Name: "pod2", Labels: selector, UID: types.UID("pod2")}}},
|
||||
node: &node1,
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}}},
|
||||
labels: []string{"region"},
|
||||
res: framework.Success,
|
||||
},
|
||||
{
|
||||
name: "service pod on different node, region mismatch",
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", Labels: selector, UID: types.UID("pod1")}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Name: "pod2", Labels: selector, UID: types.UID("pod2")}}},
|
||||
node: &node1,
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}}},
|
||||
labels: []string{"region"},
|
||||
res: framework.Unschedulable,
|
||||
},
|
||||
{
|
||||
name: "service in different namespace, region mismatch",
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", Labels: selector, Namespace: "ns1", UID: types.UID("pod1")}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Name: "pod2", Labels: selector, Namespace: "ns1", UID: types.UID("pod2")}}},
|
||||
node: &node1,
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}, ObjectMeta: metav1.ObjectMeta{Namespace: "ns2"}}},
|
||||
labels: []string{"region"},
|
||||
res: framework.Success,
|
||||
},
|
||||
{
|
||||
name: "pod in different namespace, region mismatch",
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", Labels: selector, Namespace: "ns1", UID: types.UID("pod1")}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Name: "pod2", Labels: selector, Namespace: "ns2", UID: types.UID("pod2")}}},
|
||||
node: &node1,
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}, ObjectMeta: metav1.ObjectMeta{Namespace: "ns1"}}},
|
||||
labels: []string{"region"},
|
||||
res: framework.Success,
|
||||
},
|
||||
{
|
||||
name: "service and pod in same namespace, region mismatch",
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", Labels: selector, Namespace: "ns1", UID: types.UID("pod1")}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Name: "pod2", Labels: selector, Namespace: "ns1", UID: types.UID("pod2")}}},
|
||||
node: &node1,
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}, ObjectMeta: metav1.ObjectMeta{Namespace: "ns1"}}},
|
||||
labels: []string{"region"},
|
||||
res: framework.Unschedulable,
|
||||
},
|
||||
{
|
||||
name: "service pod on different node, multiple labels, not all match",
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", Labels: selector, UID: types.UID("pod1")}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Name: "pod2", Labels: selector, UID: types.UID("pod2")}}},
|
||||
node: &node1,
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}}},
|
||||
labels: []string{"region", "zone"},
|
||||
res: framework.Unschedulable,
|
||||
},
|
||||
{
|
||||
name: "service pod on different node, multiple labels, all match",
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", Labels: selector, UID: types.UID("pod1")}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine5"}, ObjectMeta: metav1.ObjectMeta{Name: "pod2", Labels: selector, UID: types.UID("pod2")}}},
|
||||
node: &node4,
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}}},
|
||||
labels: []string{"region", "zone"},
|
||||
res: framework.Success,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodes := []*v1.Node{&node1, &node2, &node3, &node4, &node5}
|
||||
snapshot := cache.NewSnapshot(test.pods, nodes)
|
||||
|
||||
p := &ServiceAffinity{
|
||||
sharedLister: snapshot,
|
||||
serviceLister: fakeframework.ServiceLister(test.services),
|
||||
args: config.ServiceAffinityArgs{
|
||||
AffinityLabels: test.labels,
|
||||
},
|
||||
}
|
||||
|
||||
state := framework.NewCycleState()
|
||||
if s := p.PreFilter(context.Background(), state, test.pod); !s.IsSuccess() {
|
||||
t.Errorf("PreFilter failed: %v", s.Message())
|
||||
}
|
||||
nodeInfo := mustGetNodeInfo(t, snapshot, test.node.Name)
|
||||
status := p.Filter(context.Background(), state, test.pod, nodeInfo)
|
||||
if status.Code() != test.res {
|
||||
t.Errorf("Status mismatch. got: %v, want: %v", status.Code(), test.res)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestServiceAffinityScore(t *testing.T) {
|
||||
labels1 := map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "blah",
|
||||
}
|
||||
labels2 := map[string]string{
|
||||
"bar": "foo",
|
||||
"baz": "blah",
|
||||
}
|
||||
zone1 := map[string]string{
|
||||
"zone": "zone1",
|
||||
}
|
||||
zone1Rack1 := map[string]string{
|
||||
"zone": "zone1",
|
||||
"rack": "rack1",
|
||||
}
|
||||
zone1Rack2 := map[string]string{
|
||||
"zone": "zone1",
|
||||
"rack": "rack2",
|
||||
}
|
||||
zone2 := map[string]string{
|
||||
"zone": "zone2",
|
||||
}
|
||||
zone2Rack1 := map[string]string{
|
||||
"zone": "zone2",
|
||||
"rack": "rack1",
|
||||
}
|
||||
nozone := map[string]string{
|
||||
"name": "value",
|
||||
}
|
||||
zone0Spec := v1.PodSpec{
|
||||
NodeName: "machine01",
|
||||
}
|
||||
zone1Spec := v1.PodSpec{
|
||||
NodeName: "machine11",
|
||||
}
|
||||
zone2Spec := v1.PodSpec{
|
||||
NodeName: "machine21",
|
||||
}
|
||||
labeledNodes := map[string]map[string]string{
|
||||
"machine01": nozone, "machine02": nozone,
|
||||
"machine11": zone1, "machine12": zone1,
|
||||
"machine21": zone2, "machine22": zone2,
|
||||
}
|
||||
nodesWithZoneAndRackLabels := map[string]map[string]string{
|
||||
"machine01": nozone, "machine02": nozone,
|
||||
"machine11": zone1Rack1, "machine12": zone1Rack2,
|
||||
"machine21": zone2Rack1, "machine22": zone2Rack1,
|
||||
}
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes map[string]map[string]string
|
||||
services []*v1.Service
|
||||
labels []string
|
||||
expectedList framework.NodeScoreList
|
||||
name string
|
||||
}{
|
||||
{
|
||||
pod: new(v1.Pod),
|
||||
nodes: labeledNodes,
|
||||
labels: []string{"zone"},
|
||||
expectedList: []framework.NodeScore{{Name: "machine11", Score: framework.MaxNodeScore}, {Name: "machine12", Score: framework.MaxNodeScore},
|
||||
{Name: "machine21", Score: framework.MaxNodeScore}, {Name: "machine22", Score: framework.MaxNodeScore},
|
||||
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
|
||||
name: "nothing scheduled",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{{Spec: zone1Spec}},
|
||||
nodes: labeledNodes,
|
||||
labels: []string{"zone"},
|
||||
expectedList: []framework.NodeScore{{Name: "machine11", Score: framework.MaxNodeScore}, {Name: "machine12", Score: framework.MaxNodeScore},
|
||||
{Name: "machine21", Score: framework.MaxNodeScore}, {Name: "machine22", Score: framework.MaxNodeScore},
|
||||
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
|
||||
name: "no services",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}},
|
||||
nodes: labeledNodes,
|
||||
labels: []string{"zone"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine11", Score: framework.MaxNodeScore}, {Name: "machine12", Score: framework.MaxNodeScore},
|
||||
{Name: "machine21", Score: framework.MaxNodeScore}, {Name: "machine22", Score: framework.MaxNodeScore},
|
||||
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
|
||||
name: "different services",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone0Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: labeledNodes,
|
||||
labels: []string{"zone"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine11", Score: framework.MaxNodeScore}, {Name: "machine12", Score: framework.MaxNodeScore},
|
||||
{Name: "machine21", Score: 0}, {Name: "machine22", Score: 0},
|
||||
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
|
||||
name: "three pods, one service pod",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: labeledNodes,
|
||||
labels: []string{"zone"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine11", Score: 50}, {Name: "machine12", Score: 50},
|
||||
{Name: "machine21", Score: 50}, {Name: "machine22", Score: 50},
|
||||
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
|
||||
name: "three pods, two service pods on different machines",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||
},
|
||||
nodes: labeledNodes,
|
||||
labels: []string{"zone"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine11", Score: 0}, {Name: "machine12", Score: 0},
|
||||
{Name: "machine21", Score: framework.MaxNodeScore}, {Name: "machine22", Score: framework.MaxNodeScore},
|
||||
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
|
||||
name: "three service label match pods in different namespaces",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: labeledNodes,
|
||||
labels: []string{"zone"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine11", Score: 66}, {Name: "machine12", Score: 66},
|
||||
{Name: "machine21", Score: 33}, {Name: "machine22", Score: 33},
|
||||
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
|
||||
name: "four pods, three service pods",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: labeledNodes,
|
||||
labels: []string{"zone"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine11", Score: 33}, {Name: "machine12", Score: 33},
|
||||
{Name: "machine21", Score: 66}, {Name: "machine22", Score: 66},
|
||||
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
|
||||
name: "service with partial pod label matches",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone0Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: labeledNodes,
|
||||
labels: []string{"zone"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine11", Score: 75}, {Name: "machine12", Score: 75},
|
||||
{Name: "machine21", Score: 50}, {Name: "machine22", Score: 50},
|
||||
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
|
||||
name: "service pod on non-zoned node",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone0Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: nodesWithZoneAndRackLabels,
|
||||
labels: []string{"zone", "rack"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine11", Score: 25}, {Name: "machine12", Score: 75},
|
||||
{Name: "machine21", Score: 25}, {Name: "machine22", Score: 25},
|
||||
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
|
||||
name: "three pods, two service pods, with rack label",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodes := makeLabeledNodeList(test.nodes)
|
||||
snapshot := cache.NewSnapshot(test.pods, nodes)
|
||||
serviceLister := fakeframework.ServiceLister(test.services)
|
||||
|
||||
p := &ServiceAffinity{
|
||||
sharedLister: snapshot,
|
||||
serviceLister: serviceLister,
|
||||
args: config.ServiceAffinityArgs{
|
||||
AntiAffinityLabelsPreference: test.labels,
|
||||
},
|
||||
}
|
||||
state := framework.NewCycleState()
|
||||
|
||||
var gotList framework.NodeScoreList
|
||||
for _, n := range makeLabeledNodeList(test.nodes) {
|
||||
score, status := p.Score(context.Background(), state, test.pod, n.Name)
|
||||
if !status.IsSuccess() {
|
||||
t.Errorf("unexpected error: %v", status)
|
||||
}
|
||||
gotList = append(gotList, framework.NodeScore{Name: n.Name, Score: score})
|
||||
}
|
||||
|
||||
status := p.ScoreExtensions().NormalizeScore(context.Background(), state, test.pod, gotList)
|
||||
if !status.IsSuccess() {
|
||||
t.Errorf("unexpected error: %v", status)
|
||||
}
|
||||
|
||||
// sort the two lists to avoid failures on account of different ordering
|
||||
sortNodeScoreList(test.expectedList)
|
||||
sortNodeScoreList(gotList)
|
||||
if !reflect.DeepEqual(test.expectedList, gotList) {
|
||||
t.Errorf("expected %#v, got %#v", test.expectedList, gotList)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPreFilterStateAddRemovePod(t *testing.T) {
|
||||
var label1 = map[string]string{
|
||||
"region": "r1",
|
||||
"zone": "z11",
|
||||
}
|
||||
var label2 = map[string]string{
|
||||
"region": "r1",
|
||||
"zone": "z12",
|
||||
}
|
||||
var label3 = map[string]string{
|
||||
"region": "r2",
|
||||
"zone": "z21",
|
||||
}
|
||||
selector1 := map[string]string{"foo": "bar"}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pendingPod *v1.Pod
|
||||
addedPod *v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
services []*v1.Service
|
||||
}{
|
||||
{
|
||||
name: "no anti-affinity or service affinity exist",
|
||||
pendingPod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pending", Labels: selector1},
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "p1", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeA"},
|
||||
},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "p2"},
|
||||
Spec: v1.PodSpec{NodeName: "nodeC"},
|
||||
},
|
||||
},
|
||||
addedPod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "addedPod", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeB"},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: label1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: label2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: label3}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "metadata service-affinity data are updated correctly after adding and removing a pod",
|
||||
pendingPod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pending", Labels: selector1},
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "p1", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeA"},
|
||||
},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "p2"},
|
||||
Spec: v1.PodSpec{NodeName: "nodeC"},
|
||||
},
|
||||
},
|
||||
addedPod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "addedPod", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeB"},
|
||||
},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector1}}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: label1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: label2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: label3}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
// getMeta creates predicate meta data given the list of pods.
|
||||
getState := func(pods []*v1.Pod) (*ServiceAffinity, *framework.CycleState, *preFilterState, *cache.Snapshot) {
|
||||
snapshot := cache.NewSnapshot(pods, test.nodes)
|
||||
|
||||
p := &ServiceAffinity{
|
||||
sharedLister: snapshot,
|
||||
serviceLister: fakeframework.ServiceLister(test.services),
|
||||
args: config.ServiceAffinityArgs{
|
||||
AffinityLabels: []string{"region", "zone"},
|
||||
},
|
||||
}
|
||||
cycleState := framework.NewCycleState()
|
||||
preFilterStatus := p.PreFilter(context.Background(), cycleState, test.pendingPod)
|
||||
if !preFilterStatus.IsSuccess() {
|
||||
t.Errorf("prefilter failed with status: %v", preFilterStatus)
|
||||
}
|
||||
|
||||
plState, err := getPreFilterState(cycleState)
|
||||
if err != nil {
|
||||
t.Errorf("failed to get metadata from cycleState: %v", err)
|
||||
}
|
||||
|
||||
return p, cycleState, plState, snapshot
|
||||
}
|
||||
|
||||
sortState := func(plState *preFilterState) *preFilterState {
|
||||
sort.SliceStable(plState.matchingPodList, func(i, j int) bool {
|
||||
return plState.matchingPodList[i].Name < plState.matchingPodList[j].Name
|
||||
})
|
||||
sort.SliceStable(plState.matchingPodServices, func(i, j int) bool {
|
||||
return plState.matchingPodServices[i].Name < plState.matchingPodServices[j].Name
|
||||
})
|
||||
return plState
|
||||
}
|
||||
|
||||
// allPodsState is the state produced when all pods, including test.addedPod are given to prefilter.
|
||||
_, _, plStateAllPods, _ := getState(append(test.existingPods, test.addedPod))
|
||||
|
||||
// state is produced for test.existingPods (without test.addedPod).
|
||||
ipa, state, plState, snapshot := getState(test.existingPods)
|
||||
// clone the state so that we can compare it later when performing Remove.
|
||||
plStateOriginal, _ := plState.Clone().(*preFilterState)
|
||||
|
||||
// Add test.addedPod to state1 and verify it is equal to allPodsState.
|
||||
nodeInfo := mustGetNodeInfo(t, snapshot, test.addedPod.Spec.NodeName)
|
||||
addedPodInfo := framework.NewPodInfo(test.addedPod)
|
||||
if err := ipa.AddPod(context.Background(), state, test.pendingPod, addedPodInfo, nodeInfo); err != nil {
|
||||
t.Errorf("error adding pod to preFilterState: %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(sortState(plStateAllPods), sortState(plState)) {
|
||||
t.Errorf("State is not equal, got: %v, want: %v", plState, plStateAllPods)
|
||||
}
|
||||
|
||||
// Remove the added pod and make sure it is equal to the original state.
|
||||
if err := ipa.RemovePod(context.Background(), state, test.pendingPod, addedPodInfo, nodeInfo); err != nil {
|
||||
t.Errorf("error removing pod from preFilterState: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(sortState(plStateOriginal), sortState(plState)) {
|
||||
t.Errorf("State is not equal, got: %v, want: %v", plState, plStateOriginal)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPreFilterStateClone(t *testing.T) {
|
||||
source := &preFilterState{
|
||||
matchingPodList: []*v1.Pod{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "pod1"}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "pod2"}},
|
||||
},
|
||||
matchingPodServices: []*v1.Service{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "service1"}},
|
||||
},
|
||||
}
|
||||
|
||||
clone := source.Clone()
|
||||
if clone == source {
|
||||
t.Errorf("Clone returned the exact same object!")
|
||||
}
|
||||
if !reflect.DeepEqual(clone, source) {
|
||||
t.Errorf("Copy is not equal to source!")
|
||||
}
|
||||
}
|
||||
|
||||
func makeLabeledNodeList(nodeMap map[string]map[string]string) []*v1.Node {
|
||||
nodes := make([]*v1.Node, 0, len(nodeMap))
|
||||
for nodeName, labels := range nodeMap {
|
||||
nodes = append(nodes, &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName, Labels: labels}})
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
func sortNodeScoreList(out framework.NodeScoreList) {
|
||||
sort.Slice(out, func(i, j int) bool {
|
||||
if out[i].Score == out[j].Score {
|
||||
return out[i].Name < out[j].Name
|
||||
}
|
||||
return out[i].Score < out[j].Score
|
||||
})
|
||||
}
|
||||
|
||||
func mustGetNodeInfo(t *testing.T, snapshot *cache.Snapshot, name string) *framework.NodeInfo {
|
||||
t.Helper()
|
||||
nodeInfo, err := snapshot.NodeInfos().Get(name)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return nodeInfo
|
||||
}
|
||||
|
||||
func TestPreFilterDisabled(t *testing.T) {
|
||||
pod := &v1.Pod{}
|
||||
nodeInfo := framework.NewNodeInfo()
|
||||
node := v1.Node{}
|
||||
nodeInfo.SetNode(&node)
|
||||
p := &ServiceAffinity{
|
||||
args: config.ServiceAffinityArgs{
|
||||
AffinityLabels: []string{"region"},
|
||||
},
|
||||
}
|
||||
cycleState := framework.NewCycleState()
|
||||
gotStatus := p.Filter(context.Background(), cycleState, pod, nodeInfo)
|
||||
wantStatus := framework.AsStatus(fmt.Errorf(`error reading "PreFilterServiceAffinity" from cycleState: %w`, framework.ErrNotFound))
|
||||
if !reflect.DeepEqual(gotStatus, wantStatus) {
|
||||
t.Errorf("status does not match: %v, want: %v", gotStatus, wantStatus)
|
||||
}
|
||||
}
|
@ -72,9 +72,6 @@ type PriorityPolicy struct {
|
||||
// PredicateArgument represents the arguments to configure predicate functions in scheduler policy configuration.
|
||||
// Only one of its members may be specified
|
||||
type PredicateArgument struct {
|
||||
// The predicate that provides affinity for pods belonging to a service
|
||||
// It uses a label to identify nodes that belong to the same "group"
|
||||
ServiceAffinity *ServiceAffinity `json:"serviceAffinity"`
|
||||
// The predicate that checks whether a particular node has a certain label
|
||||
// defined or not, regardless of value
|
||||
LabelsPresence *LabelsPresence `json:"labelsPresence"`
|
||||
@ -83,9 +80,6 @@ type PredicateArgument struct {
|
||||
// PriorityArgument represents the arguments to configure priority functions in scheduler policy configuration.
|
||||
// Only one of its members may be specified
|
||||
type PriorityArgument struct {
|
||||
// The priority function that ensures a good spread (anti-affinity) for pods belonging to a service
|
||||
// It uses a label to identify nodes that belong to the same "group"
|
||||
ServiceAntiAffinity *ServiceAntiAffinity `json:"serviceAntiAffinity"`
|
||||
// The priority function that checks whether a particular node has a certain label
|
||||
// defined or not, regardless of value
|
||||
LabelPreference *LabelPreference `json:"labelPreference"`
|
||||
@ -93,13 +87,6 @@ type PriorityArgument struct {
|
||||
RequestedToCapacityRatioArguments *RequestedToCapacityRatioArguments `json:"requestedToCapacityRatioArguments"`
|
||||
}
|
||||
|
||||
// ServiceAffinity holds the parameters that are used to configure the corresponding predicate in scheduler policy configuration.
|
||||
type ServiceAffinity struct {
|
||||
// The list of labels that identify node "groups"
|
||||
// All of the labels should match for the node to be considered a fit for hosting the pod
|
||||
Labels []string `json:"labels"`
|
||||
}
|
||||
|
||||
// LabelsPresence holds the parameters that are used to configure the corresponding predicate in scheduler policy configuration.
|
||||
type LabelsPresence struct {
|
||||
// The list of labels that identify node "groups"
|
||||
@ -109,12 +96,6 @@ type LabelsPresence struct {
|
||||
Presence bool `json:"presence"`
|
||||
}
|
||||
|
||||
// ServiceAntiAffinity holds the parameters that are used to configure the corresponding priority function
|
||||
type ServiceAntiAffinity struct {
|
||||
// Used to identify node "groups"
|
||||
Label string `json:"label"`
|
||||
}
|
||||
|
||||
// LabelPreference holds the parameters that are used to configure the corresponding priority function
|
||||
type LabelPreference struct {
|
||||
// Used to identify node "groups"
|
||||
|
@ -184,11 +184,6 @@ func (in *Policy) DeepCopyObject() runtime.Object {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PredicateArgument) DeepCopyInto(out *PredicateArgument) {
|
||||
*out = *in
|
||||
if in.ServiceAffinity != nil {
|
||||
in, out := &in.ServiceAffinity, &out.ServiceAffinity
|
||||
*out = new(ServiceAffinity)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.LabelsPresence != nil {
|
||||
in, out := &in.LabelsPresence, &out.LabelsPresence
|
||||
*out = new(LabelsPresence)
|
||||
@ -231,11 +226,6 @@ func (in *PredicatePolicy) DeepCopy() *PredicatePolicy {
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PriorityArgument) DeepCopyInto(out *PriorityArgument) {
|
||||
*out = *in
|
||||
if in.ServiceAntiAffinity != nil {
|
||||
in, out := &in.ServiceAntiAffinity, &out.ServiceAntiAffinity
|
||||
*out = new(ServiceAntiAffinity)
|
||||
**out = **in
|
||||
}
|
||||
if in.LabelPreference != nil {
|
||||
in, out := &in.LabelPreference, &out.LabelPreference
|
||||
*out = new(LabelPreference)
|
||||
@ -322,43 +312,6 @@ func (in *ResourceSpec) DeepCopy() *ResourceSpec {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceAffinity) DeepCopyInto(out *ServiceAffinity) {
|
||||
*out = *in
|
||||
if in.Labels != nil {
|
||||
in, out := &in.Labels, &out.Labels
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAffinity.
|
||||
func (in *ServiceAffinity) DeepCopy() *ServiceAffinity {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceAffinity)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceAntiAffinity) DeepCopyInto(out *ServiceAntiAffinity) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAntiAffinity.
|
||||
func (in *ServiceAntiAffinity) DeepCopy() *ServiceAntiAffinity {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceAntiAffinity)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *UtilizationShapePoint) DeepCopyInto(out *UtilizationShapePoint) {
|
||||
*out = *in
|
||||
|
Loading…
Reference in New Issue
Block a user