mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Move Plugin Args defaults to versioned packages
Signed-off-by: Aldo Culquicondor <acondor@google.com>
This commit is contained in:
parent
ffec75da81
commit
2935480cc8
@ -71,6 +71,7 @@ go_test(
|
||||
deps = [
|
||||
"//cmd/kube-scheduler/app/config:go_default_library",
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/interpodaffinity:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
|
@ -91,6 +91,8 @@ func (o *DeprecatedOptions) Validate() []error {
|
||||
// 1. --use-legacy-policy-config to use a policy file.
|
||||
// 2. --policy-configmap to use a policy config map value.
|
||||
// 3. --algorithm-provider to use a named algorithm provider.
|
||||
//
|
||||
// This function is only called when no config file is provided.
|
||||
func (o *DeprecatedOptions) ApplyTo(cfg *kubeschedulerconfig.KubeSchedulerConfiguration) error {
|
||||
if o == nil {
|
||||
return nil
|
||||
@ -120,20 +122,19 @@ func (o *DeprecatedOptions) ApplyTo(cfg *kubeschedulerconfig.KubeSchedulerConfig
|
||||
}
|
||||
}
|
||||
|
||||
// The following deprecated options affect the only existing profile that is
|
||||
// added by default.
|
||||
// Deprecated flags have an effect iff no config file was provided, in which
|
||||
// case this function expects a default KubeSchedulerConfiguration instance,
|
||||
// which has a single profile.
|
||||
profile := &cfg.Profiles[0]
|
||||
if len(o.SchedulerName) > 0 {
|
||||
profile.SchedulerName = o.SchedulerName
|
||||
}
|
||||
if o.HardPodAffinitySymmetricWeight != interpodaffinity.DefaultHardPodAffinityWeight {
|
||||
plCfg := kubeschedulerconfig.PluginConfig{
|
||||
Name: interpodaffinity.Name,
|
||||
Args: &kubeschedulerconfig.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: o.HardPodAffinitySymmetricWeight,
|
||||
},
|
||||
}
|
||||
profile.PluginConfig = append(profile.PluginConfig, plCfg)
|
||||
plCfg := kubeschedulerconfig.PluginConfig{
|
||||
Name: interpodaffinity.Name,
|
||||
Args: &kubeschedulerconfig.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: o.HardPodAffinitySymmetricWeight,
|
||||
},
|
||||
}
|
||||
profile.PluginConfig = append(profile.PluginConfig, plCfg)
|
||||
return nil
|
||||
}
|
||||
|
@ -49,7 +49,6 @@ import (
|
||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
kubeschedulerscheme "k8s.io/kubernetes/pkg/scheduler/apis/config/scheme"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity"
|
||||
)
|
||||
|
||||
// Options has all the params needed to run a Scheduler
|
||||
@ -104,7 +103,7 @@ func NewOptions() (*Options, error) {
|
||||
UseLegacyPolicyConfig: false,
|
||||
PolicyConfigMapNamespace: metav1.NamespaceSystem,
|
||||
SchedulerName: corev1.DefaultSchedulerName,
|
||||
HardPodAffinitySymmetricWeight: interpodaffinity.DefaultHardPodAffinityWeight,
|
||||
HardPodAffinitySymmetricWeight: 1,
|
||||
},
|
||||
Metrics: metrics.NewOptions(),
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -728,7 +729,17 @@ profiles:
|
||||
PodInitialBackoffSeconds: defaultPodInitialBackoffSeconds,
|
||||
PodMaxBackoffSeconds: defaultPodMaxBackoffSeconds,
|
||||
Profiles: []kubeschedulerconfig.KubeSchedulerProfile{
|
||||
{SchedulerName: "my-nice-scheduler"},
|
||||
{
|
||||
SchedulerName: "my-nice-scheduler",
|
||||
PluginConfig: []kubeschedulerconfig.PluginConfig{
|
||||
{
|
||||
Name: interpodaffinity.Name,
|
||||
Args: &kubeschedulerconfig.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -233,8 +233,9 @@ profiles:
|
||||
PluginConfig: []config.PluginConfig{
|
||||
{
|
||||
Name: "InterPodAffinity",
|
||||
// TODO(acondor): Set default values.
|
||||
Args: &config.InterPodAffinityArgs{},
|
||||
Args: &config.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "NodeResourcesFit",
|
||||
|
@ -39,6 +39,7 @@ go_test(
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/config/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/kube-scheduler/config/v1alpha2:go_default_library",
|
||||
"//vendor/github.com/google/go-cmp/cmp:go_default_library",
|
||||
|
@ -156,3 +156,12 @@ func SetDefaults_KubeSchedulerConfiguration(obj *v1alpha2.KubeSchedulerConfigura
|
||||
obj.EnableContentionProfiling = &enableContentionProfiling
|
||||
}
|
||||
}
|
||||
|
||||
func SetDefaults_InterPodAffinityArgs(obj *v1alpha2.InterPodAffinityArgs) {
|
||||
// Note that an object is created manually in cmd/kube-scheduler/app/options/deprecated.go
|
||||
// DeprecatedOptions#ApplyTo.
|
||||
// Update that object if a new default field is added here.
|
||||
if obj.HardPodAffinityWeight == nil {
|
||||
obj.HardPodAffinityWeight = pointer.Int32Ptr(1)
|
||||
}
|
||||
}
|
||||
|
@ -22,6 +22,8 @@ import (
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
componentbaseconfig "k8s.io/component-base/config/v1alpha1"
|
||||
"k8s.io/kube-scheduler/config/v1alpha2"
|
||||
"k8s.io/utils/pointer"
|
||||
@ -275,3 +277,47 @@ func TestSchedulerDefaults(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPluginArgsDefaults(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
in runtime.Object
|
||||
want runtime.Object
|
||||
}{
|
||||
{
|
||||
name: "InterPodAffinityArgs empty",
|
||||
in: &v1alpha2.InterPodAffinityArgs{},
|
||||
want: &v1alpha2.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: pointer.Int32Ptr(1),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "InterPodAffinityArgs explicit 0",
|
||||
in: &v1alpha2.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: pointer.Int32Ptr(0),
|
||||
},
|
||||
want: &v1alpha2.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: pointer.Int32Ptr(0),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "InterPodAffinityArgs with value",
|
||||
in: &v1alpha2.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: pointer.Int32Ptr(5),
|
||||
},
|
||||
want: &v1alpha2.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: pointer.Int32Ptr(5),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
scheme := runtime.NewScheme()
|
||||
utilruntime.Must(AddToScheme(scheme))
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
scheme.Default(tc.in)
|
||||
if diff := cmp.Diff(tc.in, tc.want); diff != "" {
|
||||
t.Errorf("Got unexpected defaults (-want, +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -29,12 +29,17 @@ import (
|
||||
// Public to allow building arbitrary schemes.
|
||||
// All generated defaulters are covering - they call all nested defaulters.
|
||||
func RegisterDefaults(scheme *runtime.Scheme) error {
|
||||
scheme.AddTypeDefaultingFunc(&v1alpha2.InterPodAffinityArgs{}, func(obj interface{}) { SetObjectDefaults_InterPodAffinityArgs(obj.(*v1alpha2.InterPodAffinityArgs)) })
|
||||
scheme.AddTypeDefaultingFunc(&v1alpha2.KubeSchedulerConfiguration{}, func(obj interface{}) {
|
||||
SetObjectDefaults_KubeSchedulerConfiguration(obj.(*v1alpha2.KubeSchedulerConfiguration))
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func SetObjectDefaults_InterPodAffinityArgs(in *v1alpha2.InterPodAffinityArgs) {
|
||||
SetDefaults_InterPodAffinityArgs(in)
|
||||
}
|
||||
|
||||
func SetObjectDefaults_KubeSchedulerConfiguration(in *v1alpha2.KubeSchedulerConfiguration) {
|
||||
SetDefaults_KubeSchedulerConfiguration(in)
|
||||
}
|
||||
|
@ -323,16 +323,12 @@ func (c *Configurator) createFromConfig(policy schedulerapi.Policy) (*Scheduler,
|
||||
}
|
||||
for i := range c.profiles {
|
||||
prof := &c.profiles[i]
|
||||
if prof.Plugins != nil {
|
||||
return nil, errors.New("using Plugins and Policy simultaneously is not supported")
|
||||
}
|
||||
// Plugins are empty when using Policy.
|
||||
prof.Plugins = &schedulerapi.Plugins{}
|
||||
prof.Plugins.Append(&defPlugins)
|
||||
|
||||
if len(prof.PluginConfig) != 0 {
|
||||
return nil, errors.New("using PluginConfig and Policy simultaneously is not supported")
|
||||
}
|
||||
prof.PluginConfig = append(prof.PluginConfig, defPluginConfig...)
|
||||
// PluginConfig is ignored when using Policy.
|
||||
prof.PluginConfig = defPluginConfig
|
||||
}
|
||||
|
||||
return c.create()
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -96,87 +95,48 @@ func TestCreateFromConfig(t *testing.T) {
|
||||
{"name" : "NodeAffinityPriority", "weight" : 2},
|
||||
{"name" : "ImageLocalityPriority", "weight" : 1} ]
|
||||
}`)
|
||||
cases := []struct {
|
||||
name string
|
||||
plugins *schedulerapi.Plugins
|
||||
pluginCfgs []schedulerapi.PluginConfig
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "just policy",
|
||||
},
|
||||
{
|
||||
name: "policy and plugins",
|
||||
plugins: &schedulerapi.Plugins{
|
||||
Filter: &schedulerapi.PluginSet{
|
||||
Disabled: []schedulerapi.Plugin{{Name: nodelabel.Name}},
|
||||
},
|
||||
},
|
||||
wantErr: "using Plugins and Policy simultaneously is not supported",
|
||||
},
|
||||
{
|
||||
name: "policy and plugin config",
|
||||
pluginCfgs: []schedulerapi.PluginConfig{
|
||||
{Name: queuesort.Name},
|
||||
},
|
||||
wantErr: "using PluginConfig and Policy simultaneously is not supported",
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
client := fake.NewSimpleClientset()
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
factory := newConfigFactory(client, stopCh)
|
||||
factory.profiles[0].Plugins = tc.plugins
|
||||
factory.profiles[0].PluginConfig = tc.pluginCfgs
|
||||
client := fake.NewSimpleClientset()
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
factory := newConfigFactory(client, stopCh)
|
||||
|
||||
var policy schedulerapi.Policy
|
||||
if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), configData, &policy); err != nil {
|
||||
t.Errorf("Invalid configuration: %v", err)
|
||||
}
|
||||
|
||||
sched, err := factory.createFromConfig(policy)
|
||||
if tc.wantErr != "" {
|
||||
if err == nil || !strings.Contains(err.Error(), tc.wantErr) {
|
||||
t.Errorf("got err %q, want %q", err, tc.wantErr)
|
||||
}
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatalf("createFromConfig failed: %v", err)
|
||||
}
|
||||
// createFromConfig is the old codepath where we only have one profile.
|
||||
prof := sched.Profiles[testSchedulerName]
|
||||
queueSortPls := prof.ListPlugins()["QueueSortPlugin"]
|
||||
wantQueuePls := []schedulerapi.Plugin{{Name: queuesort.Name}}
|
||||
if diff := cmp.Diff(wantQueuePls, queueSortPls); diff != "" {
|
||||
t.Errorf("Unexpected QueueSort plugins (-want, +got): %s", diff)
|
||||
}
|
||||
bindPls := prof.ListPlugins()["BindPlugin"]
|
||||
wantBindPls := []schedulerapi.Plugin{{Name: defaultbinder.Name}}
|
||||
if diff := cmp.Diff(wantBindPls, bindPls); diff != "" {
|
||||
t.Errorf("Unexpected Bind plugins (-want, +got): %s", diff)
|
||||
}
|
||||
|
||||
// Verify that node label predicate/priority are converted to framework plugins.
|
||||
var wantArgs runtime.Object = &schedulerapi.NodeLabelArgs{
|
||||
PresentLabels: []string{"zone"},
|
||||
AbsentLabels: []string{"foo"},
|
||||
PresentLabelsPreference: []string{"l1"},
|
||||
AbsentLabelsPreference: []string{"l2"},
|
||||
}
|
||||
verifyPluginConvertion(t, nodelabel.Name, []string{"FilterPlugin", "ScorePlugin"}, prof, &factory.profiles[0], 6, wantArgs)
|
||||
// Verify that service affinity custom predicate/priority is converted to framework plugin.
|
||||
wantArgs = &schedulerapi.ServiceAffinityArgs{
|
||||
AffinityLabels: []string{"zone", "foo"},
|
||||
AntiAffinityLabelsPreference: []string{"rack", "zone"},
|
||||
}
|
||||
verifyPluginConvertion(t, serviceaffinity.Name, []string{"FilterPlugin", "ScorePlugin"}, prof, &factory.profiles[0], 6, wantArgs)
|
||||
// TODO(#87703): Verify all plugin configs.
|
||||
})
|
||||
var policy schedulerapi.Policy
|
||||
if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), configData, &policy); err != nil {
|
||||
t.Errorf("Invalid configuration: %v", err)
|
||||
}
|
||||
|
||||
sched, err := factory.createFromConfig(policy)
|
||||
if err != nil {
|
||||
t.Fatalf("createFromConfig failed: %v", err)
|
||||
}
|
||||
// createFromConfig is the old codepath where we only have one profile.
|
||||
prof := sched.Profiles[testSchedulerName]
|
||||
queueSortPls := prof.ListPlugins()["QueueSortPlugin"]
|
||||
wantQueuePls := []schedulerapi.Plugin{{Name: queuesort.Name}}
|
||||
if diff := cmp.Diff(wantQueuePls, queueSortPls); diff != "" {
|
||||
t.Errorf("Unexpected QueueSort plugins (-want, +got): %s", diff)
|
||||
}
|
||||
bindPls := prof.ListPlugins()["BindPlugin"]
|
||||
wantBindPls := []schedulerapi.Plugin{{Name: defaultbinder.Name}}
|
||||
if diff := cmp.Diff(wantBindPls, bindPls); diff != "" {
|
||||
t.Errorf("Unexpected Bind plugins (-want, +got): %s", diff)
|
||||
}
|
||||
|
||||
// Verify that node label predicate/priority are converted to framework plugins.
|
||||
var wantArgs runtime.Object = &schedulerapi.NodeLabelArgs{
|
||||
PresentLabels: []string{"zone"},
|
||||
AbsentLabels: []string{"foo"},
|
||||
PresentLabelsPreference: []string{"l1"},
|
||||
AbsentLabelsPreference: []string{"l2"},
|
||||
}
|
||||
verifyPluginConvertion(t, nodelabel.Name, []string{"FilterPlugin", "ScorePlugin"}, prof, &factory.profiles[0], 6, wantArgs)
|
||||
// Verify that service affinity custom predicate/priority is converted to framework plugin.
|
||||
wantArgs = &schedulerapi.ServiceAffinityArgs{
|
||||
AffinityLabels: []string{"zone", "foo"},
|
||||
AntiAffinityLabelsPreference: []string{"rack", "zone"},
|
||||
}
|
||||
verifyPluginConvertion(t, serviceaffinity.Name, []string{"FilterPlugin", "ScorePlugin"}, prof, &factory.profiles[0], 6, wantArgs)
|
||||
// TODO(#87703): Verify all plugin configs.
|
||||
}
|
||||
|
||||
func verifyPluginConvertion(t *testing.T, name string, extensionPoints []string, prof *profile.Profile, cfg *schedulerapi.KubeSchedulerProfile, wantWeight int32, wantArgs runtime.Object) {
|
||||
|
@ -30,8 +30,6 @@ const (
|
||||
// Name is the name of the plugin used in the plugin registry and configurations.
|
||||
Name = "InterPodAffinity"
|
||||
|
||||
// DefaultHardPodAffinityWeight is the default HardPodAffinityWeight.
|
||||
DefaultHardPodAffinityWeight int32 = 1
|
||||
// MinHardPodAffinityWeight is the minimum HardPodAffinityWeight.
|
||||
MinHardPodAffinityWeight int32 = 0
|
||||
// MaxHardPodAffinityWeight is the maximum HardPodAffinityWeight.
|
||||
@ -79,11 +77,6 @@ func New(plArgs runtime.Object, h framework.FrameworkHandle) (framework.Plugin,
|
||||
}
|
||||
|
||||
func getArgs(obj runtime.Object) (config.InterPodAffinityArgs, error) {
|
||||
if obj == nil {
|
||||
return config.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: DefaultHardPodAffinityWeight,
|
||||
}, nil
|
||||
}
|
||||
ptr, ok := obj.(*config.InterPodAffinityArgs)
|
||||
if !ok {
|
||||
return config.InterPodAffinityArgs{}, fmt.Errorf("want args to be of type InterPodAffinityArgs, got %T", obj)
|
||||
|
@ -519,7 +519,7 @@ func TestPreferredAffinity(t *testing.T) {
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
p := &InterPodAffinity{
|
||||
args: config.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: DefaultHardPodAffinityWeight,
|
||||
HardPodAffinityWeight: 1,
|
||||
},
|
||||
sharedLister: snapshot,
|
||||
}
|
||||
|
@ -68,9 +68,6 @@ func New(plArgs runtime.Object, handle framework.FrameworkHandle) (framework.Plu
|
||||
}
|
||||
|
||||
func getArgs(obj runtime.Object) (config.NodeLabelArgs, error) {
|
||||
if obj == nil {
|
||||
return config.NodeLabelArgs{}, nil
|
||||
}
|
||||
ptr, ok := obj.(*config.NodeLabelArgs)
|
||||
if !ok {
|
||||
return config.NodeLabelArgs{}, fmt.Errorf("want args to be of type NodeLabelArgs, got %T", obj)
|
||||
|
@ -318,7 +318,7 @@ func TestNodeLabelFilterWithoutNode(t *testing.T) {
|
||||
var pod *v1.Pod
|
||||
t.Run("node does not exist", func(t *testing.T) {
|
||||
nodeInfo := framework.NewNodeInfo()
|
||||
p, err := New(nil, nil)
|
||||
p, err := New(&config.NodeLabelArgs{}, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create plugin: %v", err)
|
||||
}
|
||||
@ -332,7 +332,7 @@ func TestNodeLabelFilterWithoutNode(t *testing.T) {
|
||||
func TestNodeLabelScoreWithoutNode(t *testing.T) {
|
||||
t.Run("node does not exist", func(t *testing.T) {
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(cache.NewEmptySnapshot()))
|
||||
p, err := New(nil, fh)
|
||||
p, err := New(&config.NodeLabelArgs{}, fh)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create plugin: %+v", err)
|
||||
}
|
||||
|
@ -75,9 +75,6 @@ func NewFit(plArgs runtime.Object, _ framework.FrameworkHandle) (framework.Plugi
|
||||
}
|
||||
|
||||
func getFitArgs(obj runtime.Object) (config.NodeResourcesFitArgs, error) {
|
||||
if obj == nil {
|
||||
return config.NodeResourcesFitArgs{}, nil
|
||||
}
|
||||
ptr, ok := obj.(*config.NodeResourcesFitArgs)
|
||||
if !ok {
|
||||
return config.NodeResourcesFitArgs{}, fmt.Errorf("want args to be of type NodeResourcesFitArgs, got %T", obj)
|
||||
|
@ -402,7 +402,10 @@ func TestPreFilterDisabled(t *testing.T) {
|
||||
nodeInfo := framework.NewNodeInfo()
|
||||
node := v1.Node{}
|
||||
nodeInfo.SetNode(&node)
|
||||
p, _ := NewFit(nil, nil)
|
||||
p, err := NewFit(&config.NodeResourcesFitArgs{}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cycleState := framework.NewCycleState()
|
||||
gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), cycleState, pod, nodeInfo)
|
||||
wantStatus := framework.NewStatus(framework.Error, `error reading "PreFilterNodeResourcesFit" from cycleState: not found`)
|
||||
@ -449,7 +452,10 @@ func TestNotEnoughRequests(t *testing.T) {
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 1, 0, 0, 0)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
|
||||
p, _ := NewFit(nil, nil)
|
||||
p, err := NewFit(&config.NodeResourcesFitArgs{}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cycleState := framework.NewCycleState()
|
||||
preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(context.Background(), cycleState, test.pod)
|
||||
if !preFilterStatus.IsSuccess() {
|
||||
@ -505,7 +511,10 @@ func TestStorageRequests(t *testing.T) {
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
|
||||
p, _ := NewFit(nil, nil)
|
||||
p, err := NewFit(&config.NodeResourcesFitArgs{}, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cycleState := framework.NewCycleState()
|
||||
preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(context.Background(), cycleState, test.pod)
|
||||
if !preFilterStatus.IsSuccess() {
|
||||
|
@ -92,9 +92,6 @@ func NewRequestedToCapacityRatio(plArgs runtime.Object, handle framework.Framewo
|
||||
}
|
||||
|
||||
func getRequestedToCapacityRatioArgs(obj runtime.Object) (config.RequestedToCapacityRatioArgs, error) {
|
||||
if obj == nil {
|
||||
return config.RequestedToCapacityRatioArgs{}, nil
|
||||
}
|
||||
ptr, ok := obj.(*config.RequestedToCapacityRatioArgs)
|
||||
if !ok {
|
||||
return config.RequestedToCapacityRatioArgs{}, fmt.Errorf("want args to be of type RequestedToCapacityRatioArgs, got %T", obj)
|
||||
|
@ -96,9 +96,6 @@ func New(plArgs runtime.Object, h framework.FrameworkHandle) (framework.Plugin,
|
||||
}
|
||||
|
||||
func getArgs(obj runtime.Object) (config.PodTopologySpreadArgs, error) {
|
||||
if obj == nil {
|
||||
return config.PodTopologySpreadArgs{}, nil
|
||||
}
|
||||
ptr, ok := obj.(*config.PodTopologySpreadArgs)
|
||||
if !ok {
|
||||
return config.PodTopologySpreadArgs{}, fmt.Errorf("want args to be of type PodTopologySpreadArgs, got %T", obj)
|
||||
|
@ -78,9 +78,6 @@ func New(plArgs runtime.Object, handle framework.FrameworkHandle) (framework.Plu
|
||||
}
|
||||
|
||||
func getArgs(obj runtime.Object) (config.ServiceAffinityArgs, error) {
|
||||
if obj == nil {
|
||||
return config.ServiceAffinityArgs{}, nil
|
||||
}
|
||||
ptr, ok := obj.(*config.ServiceAffinityArgs)
|
||||
if !ok {
|
||||
return config.ServiceAffinityArgs{}, fmt.Errorf("want args to be of type ServiceAffinityArgs, got %T", obj)
|
||||
|
@ -19,6 +19,7 @@ go_library(
|
||||
"//pkg/controller/volume/scheduling:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/apis/config/scheme:go_default_library",
|
||||
"//pkg/scheduler/internal/parallelize:go_default_library",
|
||||
"//pkg/scheduler/metrics:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
@ -32,6 +33,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/metrics:go_default_library",
|
||||
"//staging/src/k8s.io/kube-scheduler/config/v1alpha2:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/sigs.k8s.io/yaml:go_default_library",
|
||||
],
|
||||
@ -72,6 +74,7 @@ go_test(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/github.com/google/go-cmp/cmp:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_model/go:go_default_library",
|
||||
],
|
||||
|
@ -29,8 +29,10 @@ import (
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kube-scheduler/config/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/scheduling"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config/scheme"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/parallelize"
|
||||
"k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||
)
|
||||
@ -54,6 +56,8 @@ const (
|
||||
permit = "Permit"
|
||||
)
|
||||
|
||||
var configDecoder = scheme.Codecs.UniversalDecoder()
|
||||
|
||||
// framework is the component responsible for initializing and running scheduler
|
||||
// plugins.
|
||||
type framework struct {
|
||||
@ -197,7 +201,7 @@ func NewFramework(r Registry, plugins *config.Plugins, args []config.PluginConfi
|
||||
// get needed plugins from config
|
||||
pg := f.pluginsNeeded(plugins)
|
||||
|
||||
pluginConfig := make(map[string]runtime.Object, 0)
|
||||
pluginConfig := make(map[string]runtime.Object, len(args))
|
||||
for i := range args {
|
||||
name := args[i].Name
|
||||
if _, ok := pluginConfig[name]; ok {
|
||||
@ -214,7 +218,11 @@ func NewFramework(r Registry, plugins *config.Plugins, args []config.PluginConfi
|
||||
continue
|
||||
}
|
||||
|
||||
p, err := factory(pluginConfig[name], f)
|
||||
args, err := getPluginArgsOrDefault(pluginConfig, name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting args for Plugin %q: %w", name, err)
|
||||
}
|
||||
p, err := factory(args, f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error initializing plugin %q: %v", name, err)
|
||||
}
|
||||
@ -260,6 +268,25 @@ func NewFramework(r Registry, plugins *config.Plugins, args []config.PluginConfi
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// getPluginArgsOrDefault returns a configuration provided by the user or builds
|
||||
// a default from the scheme. Returns `nil, nil` if the plugin does not have a
|
||||
// defined arg types, such as in-tree plugins that don't require configuration
|
||||
// or out-of-tree plugins.
|
||||
func getPluginArgsOrDefault(pluginConfig map[string]runtime.Object, name string) (runtime.Object, error) {
|
||||
res, ok := pluginConfig[name]
|
||||
if ok {
|
||||
return res, nil
|
||||
}
|
||||
// Use defaults from latest config API version.
|
||||
gvk := v1alpha2.SchemeGroupVersion.WithKind(name + "Args")
|
||||
obj, _, err := configDecoder.Decode(nil, &gvk, nil)
|
||||
if runtime.IsNotRegisteredError(err) {
|
||||
// This plugin is out-of-tree or doesn't require configuration.
|
||||
return nil, nil
|
||||
}
|
||||
return obj, err
|
||||
}
|
||||
|
||||
func updatePluginList(pluginList interface{}, pluginSet *config.PluginSet, pluginsMap map[string]Plugin) error {
|
||||
if pluginSet == nil {
|
||||
return nil
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@ -133,21 +134,21 @@ type TestPlugin struct {
|
||||
inj injectedResult
|
||||
}
|
||||
|
||||
type TestPluginPreFilterExtension struct {
|
||||
inj injectedResult
|
||||
func (pl *TestPlugin) AddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *NodeInfo) *Status {
|
||||
return NewStatus(Code(pl.inj.PreFilterAddPodStatus), "injected status")
|
||||
}
|
||||
|
||||
func (e *TestPluginPreFilterExtension) AddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *NodeInfo) *Status {
|
||||
return NewStatus(Code(e.inj.PreFilterAddPodStatus), "injected status")
|
||||
}
|
||||
func (e *TestPluginPreFilterExtension) RemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *NodeInfo) *Status {
|
||||
return NewStatus(Code(e.inj.PreFilterRemovePodStatus), "injected status")
|
||||
func (pl *TestPlugin) RemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *NodeInfo) *Status {
|
||||
return NewStatus(Code(pl.inj.PreFilterRemovePodStatus), "injected status")
|
||||
}
|
||||
|
||||
func (pl *TestPlugin) Name() string {
|
||||
return pl.name
|
||||
}
|
||||
|
||||
func (pl *TestPlugin) Less(*QueuedPodInfo, *QueuedPodInfo) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (pl *TestPlugin) Score(ctx context.Context, state *CycleState, p *v1.Pod, nodeName string) (int64, *Status) {
|
||||
return 0, NewStatus(Code(pl.inj.ScoreStatus), "injected status")
|
||||
}
|
||||
@ -161,7 +162,7 @@ func (pl *TestPlugin) PreFilter(ctx context.Context, state *CycleState, p *v1.Po
|
||||
}
|
||||
|
||||
func (pl *TestPlugin) PreFilterExtensions() PreFilterExtensions {
|
||||
return &TestPluginPreFilterExtension{inj: pl.inj}
|
||||
return pl
|
||||
}
|
||||
|
||||
func (pl *TestPlugin) Filter(ctx context.Context, state *CycleState, pod *v1.Pod, nodeInfo *NodeInfo) *Status {
|
||||
@ -458,6 +459,97 @@ func TestNewFrameworkErrors(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func recordingPluginFactory(name string, result map[string]runtime.Object) PluginFactory {
|
||||
return func(args runtime.Object, f FrameworkHandle) (Plugin, error) {
|
||||
result[name] = args
|
||||
return &TestPlugin{
|
||||
name: name,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewFrameworkPluginDefaults(t *testing.T) {
|
||||
// In-tree plugins that use args.
|
||||
pluginsWithArgs := []string{"InterPodAffinity", "NodeLabel", "NodeResourcesFit", "RequestedToCapacityRatio", "PodTopologySpread"}
|
||||
plugins := config.Plugins{
|
||||
Filter: &config.PluginSet{},
|
||||
}
|
||||
// Use all plugins in Filter.
|
||||
for _, name := range pluginsWithArgs {
|
||||
plugins.Filter.Enabled = append(plugins.Filter.Enabled, config.Plugin{Name: name})
|
||||
}
|
||||
// Set required extension points.
|
||||
onePlugin := &config.PluginSet{
|
||||
Enabled: []config.Plugin{{Name: pluginsWithArgs[0]}},
|
||||
}
|
||||
plugins.QueueSort = onePlugin
|
||||
plugins.Bind = onePlugin
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pluginCfg []config.PluginConfig
|
||||
wantCfg map[string]runtime.Object
|
||||
}{
|
||||
{
|
||||
name: "empty plugin config",
|
||||
wantCfg: map[string]runtime.Object{
|
||||
"InterPodAffinity": &config.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: 1,
|
||||
},
|
||||
"NodeLabel": &config.NodeLabelArgs{},
|
||||
"NodeResourcesFit": &config.NodeResourcesFitArgs{},
|
||||
"RequestedToCapacityRatio": &config.RequestedToCapacityRatioArgs{},
|
||||
"PodTopologySpread": &config.PodTopologySpreadArgs{},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "some overridden plugin config",
|
||||
pluginCfg: []config.PluginConfig{
|
||||
{
|
||||
Name: "InterPodAffinity",
|
||||
Args: &config.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: 3,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "NodeResourcesFit",
|
||||
Args: &config.NodeResourcesFitArgs{
|
||||
IgnoredResources: []string{"example.com/foo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantCfg: map[string]runtime.Object{
|
||||
"InterPodAffinity": &config.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: 3,
|
||||
},
|
||||
"NodeLabel": &config.NodeLabelArgs{},
|
||||
"NodeResourcesFit": &config.NodeResourcesFitArgs{
|
||||
IgnoredResources: []string{"example.com/foo"},
|
||||
},
|
||||
"RequestedToCapacityRatio": &config.RequestedToCapacityRatioArgs{},
|
||||
"PodTopologySpread": &config.PodTopologySpreadArgs{},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// result will hold plugin args passed to factories.
|
||||
result := make(map[string]runtime.Object)
|
||||
registry := make(Registry, len(pluginsWithArgs))
|
||||
for _, name := range pluginsWithArgs {
|
||||
registry[name] = recordingPluginFactory(name, result)
|
||||
}
|
||||
_, err := NewFramework(registry, &plugins, tt.pluginCfg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diff := cmp.Diff(tt.wantCfg, result); diff != "" {
|
||||
t.Errorf("unexpected plugin args (-want,+got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunScorePlugins(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
Loading…
Reference in New Issue
Block a user