mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 15:05:27 +00:00
Merge pull request #87725 from alculquicondor/rm_affinity_weight
Add defaults to pod affinity args
This commit is contained in:
commit
7cfa396e7c
@ -68,6 +68,7 @@ go_test(
|
||||
"//pkg/scheduler/core:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/defaultbinder:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/interpodaffinity:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/nodelabel:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/nodeports:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/noderesources:go_default_library",
|
||||
|
@ -30,7 +30,6 @@ go_library(
|
||||
"//pkg/scheduler/framework/plugins/volumebinding:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/volumerestrictions:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/volumezone:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
@ -60,7 +59,6 @@ go_test(
|
||||
"//pkg/scheduler/framework/plugins/volumebinding:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/volumerestrictions:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/volumezone:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||
"//vendor/github.com/google/go-cmp/cmp:go_default_library",
|
||||
|
@ -17,11 +17,9 @@ limitations under the License.
|
||||
package algorithmprovider
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
@ -48,21 +46,15 @@ import (
|
||||
// ClusterAutoscalerProvider defines the default autoscaler provider
|
||||
const ClusterAutoscalerProvider = "ClusterAutoscalerProvider"
|
||||
|
||||
// Config the configuration of an algorithm provider.
|
||||
type Config struct {
|
||||
FrameworkPlugins *schedulerapi.Plugins
|
||||
FrameworkPluginConfig []schedulerapi.PluginConfig
|
||||
}
|
||||
|
||||
// Registry is a collection of all available algorithm providers.
|
||||
type Registry map[string]*Config
|
||||
type Registry map[string]*schedulerapi.Plugins
|
||||
|
||||
// NewRegistry returns an algorithm provider registry instance.
|
||||
func NewRegistry(hardPodAffinityWeight int64) Registry {
|
||||
defaultConfig := getDefaultConfig(hardPodAffinityWeight)
|
||||
func NewRegistry() Registry {
|
||||
defaultConfig := getDefaultConfig()
|
||||
applyFeatureGates(defaultConfig)
|
||||
|
||||
caConfig := getClusterAutoscalerConfig(hardPodAffinityWeight)
|
||||
caConfig := getClusterAutoscalerConfig()
|
||||
applyFeatureGates(caConfig)
|
||||
|
||||
return Registry{
|
||||
@ -73,7 +65,7 @@ func NewRegistry(hardPodAffinityWeight int64) Registry {
|
||||
|
||||
// ListAlgorithmProviders lists registered algorithm providers.
|
||||
func ListAlgorithmProviders() string {
|
||||
r := NewRegistry(1)
|
||||
r := NewRegistry()
|
||||
var providers []string
|
||||
for k := range r {
|
||||
providers = append(providers, k)
|
||||
@ -82,109 +74,94 @@ func ListAlgorithmProviders() string {
|
||||
return strings.Join(providers, " | ")
|
||||
}
|
||||
|
||||
func getDefaultConfig(hardPodAffinityWeight int64) *Config {
|
||||
return &Config{
|
||||
FrameworkPlugins: &schedulerapi.Plugins{
|
||||
QueueSort: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: queuesort.Name},
|
||||
},
|
||||
},
|
||||
PreFilter: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: noderesources.FitName},
|
||||
{Name: nodeports.Name},
|
||||
{Name: interpodaffinity.Name},
|
||||
},
|
||||
},
|
||||
Filter: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: nodeunschedulable.Name},
|
||||
{Name: noderesources.FitName},
|
||||
{Name: nodename.Name},
|
||||
{Name: nodeports.Name},
|
||||
{Name: nodeaffinity.Name},
|
||||
{Name: volumerestrictions.Name},
|
||||
{Name: tainttoleration.Name},
|
||||
{Name: nodevolumelimits.EBSName},
|
||||
{Name: nodevolumelimits.GCEPDName},
|
||||
{Name: nodevolumelimits.CSIName},
|
||||
{Name: nodevolumelimits.AzureDiskName},
|
||||
{Name: volumebinding.Name},
|
||||
{Name: volumezone.Name},
|
||||
{Name: interpodaffinity.Name},
|
||||
},
|
||||
},
|
||||
PostFilter: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: interpodaffinity.Name},
|
||||
{Name: defaultpodtopologyspread.Name},
|
||||
{Name: tainttoleration.Name},
|
||||
},
|
||||
},
|
||||
Score: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: noderesources.BalancedAllocationName, Weight: 1},
|
||||
{Name: imagelocality.Name, Weight: 1},
|
||||
{Name: interpodaffinity.Name, Weight: 1},
|
||||
{Name: noderesources.LeastAllocatedName, Weight: 1},
|
||||
{Name: nodeaffinity.Name, Weight: 1},
|
||||
{Name: nodepreferavoidpods.Name, Weight: 10000},
|
||||
{Name: defaultpodtopologyspread.Name, Weight: 1},
|
||||
{Name: tainttoleration.Name, Weight: 1},
|
||||
},
|
||||
},
|
||||
Bind: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: defaultbinder.Name},
|
||||
},
|
||||
func getDefaultConfig() *schedulerapi.Plugins {
|
||||
return &schedulerapi.Plugins{
|
||||
QueueSort: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: queuesort.Name},
|
||||
},
|
||||
},
|
||||
FrameworkPluginConfig: []schedulerapi.PluginConfig{
|
||||
{
|
||||
Name: interpodaffinity.Name,
|
||||
Args: runtime.Unknown{Raw: []byte(fmt.Sprintf(`{"hardPodAffinityWeight":%d}`, hardPodAffinityWeight))},
|
||||
PreFilter: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: noderesources.FitName},
|
||||
{Name: nodeports.Name},
|
||||
{Name: interpodaffinity.Name},
|
||||
},
|
||||
},
|
||||
Filter: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: nodeunschedulable.Name},
|
||||
{Name: noderesources.FitName},
|
||||
{Name: nodename.Name},
|
||||
{Name: nodeports.Name},
|
||||
{Name: nodeaffinity.Name},
|
||||
{Name: volumerestrictions.Name},
|
||||
{Name: tainttoleration.Name},
|
||||
{Name: nodevolumelimits.EBSName},
|
||||
{Name: nodevolumelimits.GCEPDName},
|
||||
{Name: nodevolumelimits.CSIName},
|
||||
{Name: nodevolumelimits.AzureDiskName},
|
||||
{Name: volumebinding.Name},
|
||||
{Name: volumezone.Name},
|
||||
{Name: interpodaffinity.Name},
|
||||
},
|
||||
},
|
||||
PostFilter: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: interpodaffinity.Name},
|
||||
{Name: defaultpodtopologyspread.Name},
|
||||
{Name: tainttoleration.Name},
|
||||
},
|
||||
},
|
||||
Score: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: noderesources.BalancedAllocationName, Weight: 1},
|
||||
{Name: imagelocality.Name, Weight: 1},
|
||||
{Name: interpodaffinity.Name, Weight: 1},
|
||||
{Name: noderesources.LeastAllocatedName, Weight: 1},
|
||||
{Name: nodeaffinity.Name, Weight: 1},
|
||||
{Name: nodepreferavoidpods.Name, Weight: 10000},
|
||||
{Name: defaultpodtopologyspread.Name, Weight: 1},
|
||||
{Name: tainttoleration.Name, Weight: 1},
|
||||
},
|
||||
},
|
||||
Bind: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: defaultbinder.Name},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func getClusterAutoscalerConfig(hardPodAffinityWeight int64) *Config {
|
||||
defaultConfig := getDefaultConfig(hardPodAffinityWeight)
|
||||
caConfig := Config{
|
||||
FrameworkPlugins: &schedulerapi.Plugins{},
|
||||
}
|
||||
defaultConfig.FrameworkPlugins.DeepCopyInto(caConfig.FrameworkPlugins)
|
||||
caConfig.FrameworkPluginConfig = append([]schedulerapi.PluginConfig(nil), defaultConfig.FrameworkPluginConfig...)
|
||||
|
||||
func getClusterAutoscalerConfig() *schedulerapi.Plugins {
|
||||
caConfig := getDefaultConfig()
|
||||
// Replace least with most requested.
|
||||
for i := range caConfig.FrameworkPlugins.Score.Enabled {
|
||||
if caConfig.FrameworkPlugins.Score.Enabled[i].Name == noderesources.LeastAllocatedName {
|
||||
caConfig.FrameworkPlugins.Score.Enabled[i].Name = noderesources.MostAllocatedName
|
||||
for i := range caConfig.Score.Enabled {
|
||||
if caConfig.Score.Enabled[i].Name == noderesources.LeastAllocatedName {
|
||||
caConfig.Score.Enabled[i].Name = noderesources.MostAllocatedName
|
||||
}
|
||||
}
|
||||
|
||||
return &caConfig
|
||||
return caConfig
|
||||
}
|
||||
|
||||
func applyFeatureGates(config *Config) {
|
||||
func applyFeatureGates(config *schedulerapi.Plugins) {
|
||||
// Only add EvenPodsSpread if the feature is enabled.
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.EvenPodsSpread) {
|
||||
klog.Infof("Registering EvenPodsSpread predicate and priority function")
|
||||
f := schedulerapi.Plugin{Name: podtopologyspread.Name}
|
||||
config.FrameworkPlugins.PreFilter.Enabled = append(config.FrameworkPlugins.PreFilter.Enabled, f)
|
||||
config.FrameworkPlugins.Filter.Enabled = append(config.FrameworkPlugins.Filter.Enabled, f)
|
||||
config.FrameworkPlugins.PostFilter.Enabled = append(config.FrameworkPlugins.PostFilter.Enabled, f)
|
||||
config.PreFilter.Enabled = append(config.PreFilter.Enabled, f)
|
||||
config.Filter.Enabled = append(config.Filter.Enabled, f)
|
||||
config.PostFilter.Enabled = append(config.PostFilter.Enabled, f)
|
||||
s := schedulerapi.Plugin{Name: podtopologyspread.Name, Weight: 1}
|
||||
config.FrameworkPlugins.Score.Enabled = append(config.FrameworkPlugins.Score.Enabled, s)
|
||||
config.Score.Enabled = append(config.Score.Enabled, s)
|
||||
}
|
||||
|
||||
// Prioritizes nodes that satisfy pod's resource limits
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ResourceLimitsPriorityFunction) {
|
||||
klog.Infof("Registering resourcelimits priority function")
|
||||
s := schedulerapi.Plugin{Name: noderesources.ResourceLimitsName}
|
||||
config.FrameworkPlugins.PostFilter.Enabled = append(config.FrameworkPlugins.PostFilter.Enabled, s)
|
||||
config.PostFilter.Enabled = append(config.PostFilter.Enabled, s)
|
||||
s = schedulerapi.Plugin{Name: noderesources.ResourceLimitsName, Weight: 1}
|
||||
config.FrameworkPlugins.Score.Enabled = append(config.FrameworkPlugins.Score.Enabled, s)
|
||||
config.Score.Enabled = append(config.Score.Enabled, s)
|
||||
}
|
||||
}
|
||||
|
@ -17,12 +17,10 @@ limitations under the License.
|
||||
package algorithmprovider
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
@ -47,73 +45,64 @@ import (
|
||||
)
|
||||
|
||||
func TestClusterAutoscalerProvider(t *testing.T) {
|
||||
hardPodAffinityWeight := int64(1)
|
||||
wantConfig := &Config{
|
||||
FrameworkPlugins: &schedulerapi.Plugins{
|
||||
QueueSort: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: queuesort.Name},
|
||||
},
|
||||
},
|
||||
PreFilter: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: noderesources.FitName},
|
||||
{Name: nodeports.Name},
|
||||
{Name: interpodaffinity.Name},
|
||||
},
|
||||
},
|
||||
Filter: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: nodeunschedulable.Name},
|
||||
{Name: noderesources.FitName},
|
||||
{Name: nodename.Name},
|
||||
{Name: nodeports.Name},
|
||||
{Name: nodeaffinity.Name},
|
||||
{Name: volumerestrictions.Name},
|
||||
{Name: tainttoleration.Name},
|
||||
{Name: nodevolumelimits.EBSName},
|
||||
{Name: nodevolumelimits.GCEPDName},
|
||||
{Name: nodevolumelimits.CSIName},
|
||||
{Name: nodevolumelimits.AzureDiskName},
|
||||
{Name: volumebinding.Name},
|
||||
{Name: volumezone.Name},
|
||||
{Name: interpodaffinity.Name},
|
||||
},
|
||||
},
|
||||
PostFilter: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: interpodaffinity.Name},
|
||||
{Name: defaultpodtopologyspread.Name},
|
||||
{Name: tainttoleration.Name},
|
||||
},
|
||||
},
|
||||
Score: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: noderesources.BalancedAllocationName, Weight: 1},
|
||||
{Name: imagelocality.Name, Weight: 1},
|
||||
{Name: interpodaffinity.Name, Weight: 1},
|
||||
{Name: noderesources.MostAllocatedName, Weight: 1},
|
||||
{Name: nodeaffinity.Name, Weight: 1},
|
||||
{Name: nodepreferavoidpods.Name, Weight: 10000},
|
||||
{Name: defaultpodtopologyspread.Name, Weight: 1},
|
||||
{Name: tainttoleration.Name, Weight: 1},
|
||||
},
|
||||
},
|
||||
Bind: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: defaultbinder.Name},
|
||||
},
|
||||
wantConfig := &schedulerapi.Plugins{
|
||||
QueueSort: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: queuesort.Name},
|
||||
},
|
||||
},
|
||||
FrameworkPluginConfig: []schedulerapi.PluginConfig{
|
||||
{
|
||||
Name: interpodaffinity.Name,
|
||||
Args: runtime.Unknown{Raw: []byte(fmt.Sprintf(`{"hardPodAffinityWeight":%d}`, hardPodAffinityWeight))},
|
||||
PreFilter: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: noderesources.FitName},
|
||||
{Name: nodeports.Name},
|
||||
{Name: interpodaffinity.Name},
|
||||
},
|
||||
},
|
||||
Filter: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: nodeunschedulable.Name},
|
||||
{Name: noderesources.FitName},
|
||||
{Name: nodename.Name},
|
||||
{Name: nodeports.Name},
|
||||
{Name: nodeaffinity.Name},
|
||||
{Name: volumerestrictions.Name},
|
||||
{Name: tainttoleration.Name},
|
||||
{Name: nodevolumelimits.EBSName},
|
||||
{Name: nodevolumelimits.GCEPDName},
|
||||
{Name: nodevolumelimits.CSIName},
|
||||
{Name: nodevolumelimits.AzureDiskName},
|
||||
{Name: volumebinding.Name},
|
||||
{Name: volumezone.Name},
|
||||
{Name: interpodaffinity.Name},
|
||||
},
|
||||
},
|
||||
PostFilter: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: interpodaffinity.Name},
|
||||
{Name: defaultpodtopologyspread.Name},
|
||||
{Name: tainttoleration.Name},
|
||||
},
|
||||
},
|
||||
Score: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: noderesources.BalancedAllocationName, Weight: 1},
|
||||
{Name: imagelocality.Name, Weight: 1},
|
||||
{Name: interpodaffinity.Name, Weight: 1},
|
||||
{Name: noderesources.MostAllocatedName, Weight: 1},
|
||||
{Name: nodeaffinity.Name, Weight: 1},
|
||||
{Name: nodepreferavoidpods.Name, Weight: 10000},
|
||||
{Name: defaultpodtopologyspread.Name, Weight: 1},
|
||||
{Name: tainttoleration.Name, Weight: 1},
|
||||
},
|
||||
},
|
||||
Bind: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: defaultbinder.Name},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
r := NewRegistry(hardPodAffinityWeight)
|
||||
r := NewRegistry()
|
||||
gotConfig := r[ClusterAutoscalerProvider]
|
||||
if diff := cmp.Diff(wantConfig, gotConfig); diff != "" {
|
||||
t.Errorf("unexpected config diff (-want, +got): %s", diff)
|
||||
@ -121,76 +110,67 @@ func TestClusterAutoscalerProvider(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestApplyFeatureGates(t *testing.T) {
|
||||
hardPodAffinityWeight := int64(1)
|
||||
tests := []struct {
|
||||
name string
|
||||
featuresEnabled bool
|
||||
wantConfig *Config
|
||||
wantConfig *schedulerapi.Plugins
|
||||
}{
|
||||
{
|
||||
name: "Feature gates disabled",
|
||||
featuresEnabled: false,
|
||||
wantConfig: &Config{
|
||||
FrameworkPlugins: &schedulerapi.Plugins{
|
||||
QueueSort: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: queuesort.Name},
|
||||
},
|
||||
},
|
||||
PreFilter: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: noderesources.FitName},
|
||||
{Name: nodeports.Name},
|
||||
{Name: interpodaffinity.Name},
|
||||
},
|
||||
},
|
||||
Filter: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: nodeunschedulable.Name},
|
||||
{Name: noderesources.FitName},
|
||||
{Name: nodename.Name},
|
||||
{Name: nodeports.Name},
|
||||
{Name: nodeaffinity.Name},
|
||||
{Name: volumerestrictions.Name},
|
||||
{Name: tainttoleration.Name},
|
||||
{Name: nodevolumelimits.EBSName},
|
||||
{Name: nodevolumelimits.GCEPDName},
|
||||
{Name: nodevolumelimits.CSIName},
|
||||
{Name: nodevolumelimits.AzureDiskName},
|
||||
{Name: volumebinding.Name},
|
||||
{Name: volumezone.Name},
|
||||
{Name: interpodaffinity.Name},
|
||||
},
|
||||
},
|
||||
PostFilter: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: interpodaffinity.Name},
|
||||
{Name: defaultpodtopologyspread.Name},
|
||||
{Name: tainttoleration.Name},
|
||||
},
|
||||
},
|
||||
Score: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: noderesources.BalancedAllocationName, Weight: 1},
|
||||
{Name: imagelocality.Name, Weight: 1},
|
||||
{Name: interpodaffinity.Name, Weight: 1},
|
||||
{Name: noderesources.LeastAllocatedName, Weight: 1},
|
||||
{Name: nodeaffinity.Name, Weight: 1},
|
||||
{Name: nodepreferavoidpods.Name, Weight: 10000},
|
||||
{Name: defaultpodtopologyspread.Name, Weight: 1},
|
||||
{Name: tainttoleration.Name, Weight: 1},
|
||||
},
|
||||
},
|
||||
Bind: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: defaultbinder.Name},
|
||||
},
|
||||
wantConfig: &schedulerapi.Plugins{
|
||||
QueueSort: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: queuesort.Name},
|
||||
},
|
||||
},
|
||||
FrameworkPluginConfig: []schedulerapi.PluginConfig{
|
||||
{
|
||||
Name: interpodaffinity.Name,
|
||||
Args: runtime.Unknown{Raw: []byte(fmt.Sprintf(`{"hardPodAffinityWeight":%d}`, hardPodAffinityWeight))},
|
||||
PreFilter: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: noderesources.FitName},
|
||||
{Name: nodeports.Name},
|
||||
{Name: interpodaffinity.Name},
|
||||
},
|
||||
},
|
||||
Filter: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: nodeunschedulable.Name},
|
||||
{Name: noderesources.FitName},
|
||||
{Name: nodename.Name},
|
||||
{Name: nodeports.Name},
|
||||
{Name: nodeaffinity.Name},
|
||||
{Name: volumerestrictions.Name},
|
||||
{Name: tainttoleration.Name},
|
||||
{Name: nodevolumelimits.EBSName},
|
||||
{Name: nodevolumelimits.GCEPDName},
|
||||
{Name: nodevolumelimits.CSIName},
|
||||
{Name: nodevolumelimits.AzureDiskName},
|
||||
{Name: volumebinding.Name},
|
||||
{Name: volumezone.Name},
|
||||
{Name: interpodaffinity.Name},
|
||||
},
|
||||
},
|
||||
PostFilter: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: interpodaffinity.Name},
|
||||
{Name: defaultpodtopologyspread.Name},
|
||||
{Name: tainttoleration.Name},
|
||||
},
|
||||
},
|
||||
Score: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: noderesources.BalancedAllocationName, Weight: 1},
|
||||
{Name: imagelocality.Name, Weight: 1},
|
||||
{Name: interpodaffinity.Name, Weight: 1},
|
||||
{Name: noderesources.LeastAllocatedName, Weight: 1},
|
||||
{Name: nodeaffinity.Name, Weight: 1},
|
||||
{Name: nodepreferavoidpods.Name, Weight: 10000},
|
||||
{Name: defaultpodtopologyspread.Name, Weight: 1},
|
||||
{Name: tainttoleration.Name, Weight: 1},
|
||||
},
|
||||
},
|
||||
Bind: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: defaultbinder.Name},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -198,73 +178,65 @@ func TestApplyFeatureGates(t *testing.T) {
|
||||
{
|
||||
name: "Feature gates enabled",
|
||||
featuresEnabled: true,
|
||||
wantConfig: &Config{
|
||||
FrameworkPlugins: &schedulerapi.Plugins{
|
||||
QueueSort: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: queuesort.Name},
|
||||
},
|
||||
},
|
||||
PreFilter: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: noderesources.FitName},
|
||||
{Name: nodeports.Name},
|
||||
{Name: interpodaffinity.Name},
|
||||
{Name: podtopologyspread.Name},
|
||||
},
|
||||
},
|
||||
Filter: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: nodeunschedulable.Name},
|
||||
{Name: noderesources.FitName},
|
||||
{Name: nodename.Name},
|
||||
{Name: nodeports.Name},
|
||||
{Name: nodeaffinity.Name},
|
||||
{Name: volumerestrictions.Name},
|
||||
{Name: tainttoleration.Name},
|
||||
{Name: nodevolumelimits.EBSName},
|
||||
{Name: nodevolumelimits.GCEPDName},
|
||||
{Name: nodevolumelimits.CSIName},
|
||||
{Name: nodevolumelimits.AzureDiskName},
|
||||
{Name: volumebinding.Name},
|
||||
{Name: volumezone.Name},
|
||||
{Name: interpodaffinity.Name},
|
||||
{Name: podtopologyspread.Name},
|
||||
},
|
||||
},
|
||||
PostFilter: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: interpodaffinity.Name},
|
||||
{Name: defaultpodtopologyspread.Name},
|
||||
{Name: tainttoleration.Name},
|
||||
{Name: podtopologyspread.Name},
|
||||
{Name: noderesources.ResourceLimitsName},
|
||||
},
|
||||
},
|
||||
Score: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: noderesources.BalancedAllocationName, Weight: 1},
|
||||
{Name: imagelocality.Name, Weight: 1},
|
||||
{Name: interpodaffinity.Name, Weight: 1},
|
||||
{Name: noderesources.LeastAllocatedName, Weight: 1},
|
||||
{Name: nodeaffinity.Name, Weight: 1},
|
||||
{Name: nodepreferavoidpods.Name, Weight: 10000},
|
||||
{Name: defaultpodtopologyspread.Name, Weight: 1},
|
||||
{Name: tainttoleration.Name, Weight: 1},
|
||||
{Name: podtopologyspread.Name, Weight: 1},
|
||||
{Name: noderesources.ResourceLimitsName, Weight: 1},
|
||||
},
|
||||
},
|
||||
Bind: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: defaultbinder.Name},
|
||||
},
|
||||
wantConfig: &schedulerapi.Plugins{
|
||||
QueueSort: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: queuesort.Name},
|
||||
},
|
||||
},
|
||||
FrameworkPluginConfig: []schedulerapi.PluginConfig{
|
||||
{
|
||||
Name: interpodaffinity.Name,
|
||||
Args: runtime.Unknown{Raw: []byte(fmt.Sprintf(`{"hardPodAffinityWeight":%d}`, hardPodAffinityWeight))},
|
||||
PreFilter: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: noderesources.FitName},
|
||||
{Name: nodeports.Name},
|
||||
{Name: interpodaffinity.Name},
|
||||
{Name: podtopologyspread.Name},
|
||||
},
|
||||
},
|
||||
Filter: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: nodeunschedulable.Name},
|
||||
{Name: noderesources.FitName},
|
||||
{Name: nodename.Name},
|
||||
{Name: nodeports.Name},
|
||||
{Name: nodeaffinity.Name},
|
||||
{Name: volumerestrictions.Name},
|
||||
{Name: tainttoleration.Name},
|
||||
{Name: nodevolumelimits.EBSName},
|
||||
{Name: nodevolumelimits.GCEPDName},
|
||||
{Name: nodevolumelimits.CSIName},
|
||||
{Name: nodevolumelimits.AzureDiskName},
|
||||
{Name: volumebinding.Name},
|
||||
{Name: volumezone.Name},
|
||||
{Name: interpodaffinity.Name},
|
||||
{Name: podtopologyspread.Name},
|
||||
},
|
||||
},
|
||||
PostFilter: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: interpodaffinity.Name},
|
||||
{Name: defaultpodtopologyspread.Name},
|
||||
{Name: tainttoleration.Name},
|
||||
{Name: podtopologyspread.Name},
|
||||
{Name: noderesources.ResourceLimitsName},
|
||||
},
|
||||
},
|
||||
Score: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: noderesources.BalancedAllocationName, Weight: 1},
|
||||
{Name: imagelocality.Name, Weight: 1},
|
||||
{Name: interpodaffinity.Name, Weight: 1},
|
||||
{Name: noderesources.LeastAllocatedName, Weight: 1},
|
||||
{Name: nodeaffinity.Name, Weight: 1},
|
||||
{Name: nodepreferavoidpods.Name, Weight: 10000},
|
||||
{Name: defaultpodtopologyspread.Name, Weight: 1},
|
||||
{Name: tainttoleration.Name, Weight: 1},
|
||||
{Name: podtopologyspread.Name, Weight: 1},
|
||||
{Name: noderesources.ResourceLimitsName, Weight: 1},
|
||||
},
|
||||
},
|
||||
Bind: &schedulerapi.PluginSet{
|
||||
Enabled: []schedulerapi.Plugin{
|
||||
{Name: defaultbinder.Name},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -276,7 +248,7 @@ func TestApplyFeatureGates(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ResourceLimitsPriorityFunction, test.featuresEnabled)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.EvenPodsSpread, test.featuresEnabled)()
|
||||
|
||||
r := NewRegistry(hardPodAffinityWeight)
|
||||
r := NewRegistry()
|
||||
gotConfig := r[schedulerapi.SchedulerDefaultProviderName]
|
||||
if diff := cmp.Diff(test.wantConfig, gotConfig); diff != "" {
|
||||
t.Errorf("unexpected config diff (-want, +got): %s", diff)
|
||||
|
@ -25,8 +25,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
@ -169,20 +170,17 @@ func (c *Configurator) create(extenders []core.SchedulerExtender) (*Scheduler, e
|
||||
// createFromProvider creates a scheduler from the name of a registered algorithm provider.
|
||||
func (c *Configurator) createFromProvider(providerName string) (*Scheduler, error) {
|
||||
klog.V(2).Infof("Creating scheduler from algorithm provider '%v'", providerName)
|
||||
r := algorithmprovider.NewRegistry(int64(c.hardPodAffinitySymmetricWeight))
|
||||
provider, exist := r[providerName]
|
||||
r := algorithmprovider.NewRegistry()
|
||||
defaultPlugins, exist := r[providerName]
|
||||
if !exist {
|
||||
return nil, fmt.Errorf("algorithm provider %q is not registered", providerName)
|
||||
}
|
||||
|
||||
// Combine the provided plugins with the ones from component config.
|
||||
var defaultPlugins schedulerapi.Plugins
|
||||
defaultPlugins.Append(provider.FrameworkPlugins)
|
||||
defaultPlugins.Apply(c.plugins)
|
||||
c.plugins = &defaultPlugins
|
||||
c.plugins = defaultPlugins
|
||||
|
||||
var pluginConfig []schedulerapi.PluginConfig
|
||||
pluginConfig = append(pluginConfig, provider.FrameworkPluginConfig...)
|
||||
pluginConfig := []schedulerapi.PluginConfig{c.interPodAffinityPluginConfig()}
|
||||
pluginConfig = append(pluginConfig, c.pluginConfig...)
|
||||
c.pluginConfig = pluginConfig
|
||||
|
||||
@ -268,12 +266,8 @@ func (c *Configurator) createFromConfig(policy schedulerapi.Policy) (*Scheduler,
|
||||
|
||||
klog.V(2).Infof("Creating scheduler with fit predicates '%v' and priority functions '%v'", predicateKeys, priorityKeys)
|
||||
|
||||
if c.hardPodAffinitySymmetricWeight < 1 || c.hardPodAffinitySymmetricWeight > 100 {
|
||||
return nil, fmt.Errorf("invalid hardPodAffinitySymmetricWeight: %d, must be in the range 1-100", c.hardPodAffinitySymmetricWeight)
|
||||
}
|
||||
|
||||
args.InterPodAffinityArgs = &interpodaffinity.Args{
|
||||
HardPodAffinityWeight: c.hardPodAffinitySymmetricWeight,
|
||||
HardPodAffinityWeight: &c.hardPodAffinitySymmetricWeight,
|
||||
}
|
||||
|
||||
pluginsForPredicates, pluginConfigForPredicates, err := getPredicateConfigs(predicateKeys, lr, args)
|
||||
@ -312,6 +306,15 @@ func (c *Configurator) createFromConfig(policy schedulerapi.Policy) (*Scheduler,
|
||||
return c.create(extenders)
|
||||
}
|
||||
|
||||
func (c *Configurator) interPodAffinityPluginConfig() schedulerapi.PluginConfig {
|
||||
return schedulerapi.PluginConfig{
|
||||
Name: interpodaffinity.Name,
|
||||
Args: runtime.Unknown{
|
||||
Raw: []byte(fmt.Sprintf(`{"hardPodAffinityWeight":%d}`, c.hardPodAffinitySymmetricWeight)),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// getPriorityConfigs returns priorities configuration: ones that will run as priorities and ones that will run
|
||||
// as framework plugins. Specifically, a priority will run as a framework plugin if a plugin config producer was
|
||||
// registered for that priority.
|
||||
@ -436,7 +439,7 @@ func MakeDefaultErrorFunc(client clientset.Interface, podQueue internalqueue.Sch
|
||||
// Retry asynchronously.
|
||||
// Note that this is extremely rudimentary and we need a more real error handling path.
|
||||
go func() {
|
||||
defer runtime.HandleCrash()
|
||||
defer utilruntime.HandleCrash()
|
||||
podID := types.NamespacedName{
|
||||
Namespace: pod.Namespace,
|
||||
Name: pod.Name,
|
||||
|
@ -42,7 +42,9 @@ import (
|
||||
extenderv1 "k8s.io/kubernetes/pkg/scheduler/apis/extender/v1"
|
||||
frameworkplugins "k8s.io/kubernetes/pkg/scheduler/framework/plugins"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodelabel"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
@ -190,17 +192,29 @@ func TestCreateFromConfigWithHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
"priorities" : [
|
||||
{"name" : "RackSpread", "weight" : 3, "argument" : {"serviceAntiAffinity" : {"label" : "rack"}}},
|
||||
{"name" : "NodeAffinityPriority", "weight" : 2},
|
||||
{"name" : "ImageLocalityPriority", "weight" : 1}
|
||||
{"name" : "ImageLocalityPriority", "weight" : 1},
|
||||
{"name" : "InterPodAffinityPriority", "weight" : 1}
|
||||
],
|
||||
"hardPodAffinitySymmetricWeight" : 10
|
||||
}`)
|
||||
if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), configData, &policy); err != nil {
|
||||
t.Errorf("Invalid configuration: %v", err)
|
||||
t.Fatalf("Invalid configuration: %v", err)
|
||||
}
|
||||
factory.createFromConfig(policy)
|
||||
hpa := factory.hardPodAffinitySymmetricWeight
|
||||
if hpa != 10 {
|
||||
t.Errorf("Wrong hardPodAffinitySymmetricWeight, ecpected: %d, got: %d", 10, hpa)
|
||||
// TODO(#87703): Verify that the entire pluginConfig is correct.
|
||||
foundAffinityCfg := false
|
||||
for _, cfg := range factory.pluginConfig {
|
||||
if cfg.Name == interpodaffinity.Name {
|
||||
foundAffinityCfg = true
|
||||
wantArgs := runtime.Unknown{Raw: []byte(`{"hardPodAffinityWeight":10}`)}
|
||||
|
||||
if diff := cmp.Diff(wantArgs, cfg.Args); diff != "" {
|
||||
t.Errorf("wrong InterPodAffinity args (-want, +got): %s", diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !foundAffinityCfg {
|
||||
t.Errorf("args for InterPodAffinity were not found")
|
||||
}
|
||||
}
|
||||
|
||||
@ -219,6 +233,19 @@ func TestCreateFromEmptyConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
factory.createFromConfig(policy)
|
||||
wantConfig := []schedulerapi.PluginConfig{
|
||||
{
|
||||
Name: noderesources.FitName,
|
||||
Args: runtime.Unknown{Raw: []byte(`null`)},
|
||||
},
|
||||
{
|
||||
Name: interpodaffinity.Name,
|
||||
Args: runtime.Unknown{Raw: []byte(`{"hardPodAffinityWeight":1}`)},
|
||||
},
|
||||
}
|
||||
if diff := cmp.Diff(wantConfig, factory.pluginConfig); diff != "" {
|
||||
t.Errorf("wrong plugin config (-want, +got): %s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
// Test configures a scheduler from a policy that does not specify any
|
||||
|
@ -25,12 +25,18 @@ import (
|
||||
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||
)
|
||||
|
||||
// Name is the name of the plugin used in the plugin registry and configurations.
|
||||
const Name = "InterPodAffinity"
|
||||
const (
|
||||
// Name is the name of the plugin used in the plugin registry and configurations.
|
||||
Name = "InterPodAffinity"
|
||||
|
||||
defaultHardPodAffinityWeight int32 = 1
|
||||
minHardPodAffinityWeight int32 = 0
|
||||
maxHardPodAffinityWeight int32 = 100
|
||||
)
|
||||
|
||||
// Args holds the args that are used to configure the plugin.
|
||||
type Args struct {
|
||||
HardPodAffinityWeight int32 `json:"hardPodAffinityWeight,omitempty"`
|
||||
HardPodAffinityWeight *int32 `json:"hardPodAffinityWeight,omitempty"`
|
||||
}
|
||||
|
||||
var _ framework.PreFilterPlugin = &InterPodAffinity{}
|
||||
@ -55,14 +61,31 @@ func New(plArgs *runtime.Unknown, h framework.FrameworkHandle) (framework.Plugin
|
||||
if h.SnapshotSharedLister() == nil {
|
||||
return nil, fmt.Errorf("SnapshotSharedlister is nil")
|
||||
}
|
||||
|
||||
args := &Args{}
|
||||
if err := framework.DecodeInto(plArgs, args); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &InterPodAffinity{
|
||||
if err := validateArgs(args); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pl := &InterPodAffinity{
|
||||
sharedLister: h.SnapshotSharedLister(),
|
||||
hardPodAffinityWeight: args.HardPodAffinityWeight,
|
||||
}, nil
|
||||
hardPodAffinityWeight: defaultHardPodAffinityWeight,
|
||||
}
|
||||
if args.HardPodAffinityWeight != nil {
|
||||
pl.hardPodAffinityWeight = *args.HardPodAffinityWeight
|
||||
}
|
||||
return pl, nil
|
||||
}
|
||||
|
||||
func validateArgs(args *Args) error {
|
||||
if args.HardPodAffinityWeight == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
weight := *args.HardPodAffinityWeight
|
||||
if weight < minHardPodAffinityWeight || weight > maxHardPodAffinityWeight {
|
||||
return fmt.Errorf("invalid args.hardPodAffinityWeight: %d, must be in the range %d-%d", weight, minHardPodAffinityWeight, maxHardPodAffinityWeight)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -627,7 +627,10 @@ func TestPreferredAffinityWithHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
||||
|
||||
args := &runtime.Unknown{Raw: []byte(fmt.Sprintf(`{"hardPodAffinityWeight":%d}`, test.hardPodAffinityWeight))}
|
||||
p, _ := New(args, fh)
|
||||
p, err := New(args, fh)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
status := p.(framework.PostFilterPlugin).PostFilter(context.Background(), state, test.pod, test.nodes, nil)
|
||||
if !status.IsSuccess() {
|
||||
t.Errorf("unexpected error: %v", status)
|
||||
|
@ -52,7 +52,7 @@ type Fit struct {
|
||||
type FitArgs struct {
|
||||
// IgnoredResources is the list of resources that NodeResources fit filter
|
||||
// should ignore.
|
||||
IgnoredResources []string `json:"IgnoredResources,omitempty"`
|
||||
IgnoredResources []string `json:"ignoredResources,omitempty"`
|
||||
}
|
||||
|
||||
// preFilterState computed at PreFilter and used at Filter.
|
||||
|
@ -165,13 +165,12 @@ func TestPriorityQueue_Add(t *testing.T) {
|
||||
}
|
||||
|
||||
func newDefaultFramework() framework.Framework {
|
||||
defaultCfg := algorithmprovider.NewRegistry(1)[schedulerapi.SchedulerDefaultProviderName]
|
||||
pl, pls := defaultCfg.FrameworkPlugins, defaultCfg.FrameworkPluginConfig
|
||||
plugins := algorithmprovider.NewRegistry()[schedulerapi.SchedulerDefaultProviderName]
|
||||
fakeClient := fake.NewSimpleClientset()
|
||||
fwk, err := framework.NewFramework(
|
||||
frameworkplugins.NewInTreeRegistry(),
|
||||
pl,
|
||||
pls,
|
||||
plugins,
|
||||
nil,
|
||||
framework.WithClientSet(fakeClient),
|
||||
framework.WithInformerFactory(informers.NewSharedInformerFactory(fakeClient, 0)),
|
||||
framework.WithSnapshotSharedLister(cache.NewEmptySnapshot()),
|
||||
|
Loading…
Reference in New Issue
Block a user