mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 02:11:09 +00:00
Merge pull request #90309 from alculquicondor/plugin-args-decoding
Use internal config types in scheduling plugin args
This commit is contained in:
commit
8dd93ca94c
@ -19,7 +19,6 @@ go_library(
|
||||
"//pkg/scheduler/apis/config/scheme:go_default_library",
|
||||
"//pkg/scheduler/apis/config/v1alpha2:go_default_library",
|
||||
"//pkg/scheduler/apis/config/validation:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/interpodaffinity:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
@ -21,10 +21,8 @@ import (
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
kubeschedulerv1alpha2 "k8s.io/kube-scheduler/config/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity"
|
||||
)
|
||||
|
||||
@ -129,10 +127,13 @@ func (o *DeprecatedOptions) ApplyTo(cfg *kubeschedulerconfig.KubeSchedulerConfig
|
||||
profile.SchedulerName = o.SchedulerName
|
||||
}
|
||||
if o.HardPodAffinitySymmetricWeight != interpodaffinity.DefaultHardPodAffinityWeight {
|
||||
args := kubeschedulerv1alpha2.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: &o.HardPodAffinitySymmetricWeight,
|
||||
plCfg := kubeschedulerconfig.PluginConfig{
|
||||
Name: interpodaffinity.Name,
|
||||
Args: &kubeschedulerconfig.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: o.HardPodAffinitySymmetricWeight,
|
||||
},
|
||||
}
|
||||
profile.PluginConfig = append(profile.PluginConfig, plugins.NewPluginConfig(interpodaffinity.Name, args))
|
||||
profile.PluginConfig = append(profile.PluginConfig, plCfg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -203,6 +203,9 @@ profiles:
|
||||
disabled:
|
||||
- name: baz
|
||||
pluginConfig:
|
||||
- name: InterPodAffinity
|
||||
args:
|
||||
hardPodAffinityWeight: 2
|
||||
- name: foo
|
||||
args:
|
||||
bar: baz
|
||||
@ -543,6 +546,12 @@ profiles:
|
||||
},
|
||||
},
|
||||
PluginConfig: []kubeschedulerconfig.PluginConfig{
|
||||
{
|
||||
Name: "InterPodAffinity",
|
||||
Args: &kubeschedulerconfig.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: 2,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "foo",
|
||||
Args: &runtime.Unknown{
|
||||
@ -670,9 +679,7 @@ profiles:
|
||||
PluginConfig: []kubeschedulerconfig.PluginConfig{
|
||||
{
|
||||
Name: "InterPodAffinity",
|
||||
Args: &runtime.Unknown{
|
||||
Raw: []byte(`{"hardPodAffinityWeight":5}`),
|
||||
},
|
||||
Args: &kubeschedulerconfig.InterPodAffinityArgs{HardPodAffinityWeight: 5},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -45,7 +45,6 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/kube-scheduler/config/v1alpha2:go_default_library",
|
||||
"//vendor/github.com/google/go-cmp/cmp:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
|
@ -1,4 +1,4 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@ -28,3 +28,19 @@ filegroup(
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["scheme_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/kube-scheduler/config/v1alpha2:go_default_library",
|
||||
"//vendor/github.com/google/go-cmp/cmp:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
"//vendor/sigs.k8s.io/yaml:go_default_library",
|
||||
],
|
||||
)
|
||||
|
450
pkg/scheduler/apis/config/scheme/scheme_test.go
Normal file
450
pkg/scheduler/apis/config/scheme/scheme_test.go
Normal file
@ -0,0 +1,450 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scheme
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/kube-scheduler/config/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/utils/pointer"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
func TestCodecsDecodePluginConfig(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
data []byte
|
||||
wantErr string
|
||||
wantProfiles []config.KubeSchedulerProfile
|
||||
}{
|
||||
{
|
||||
name: "v1alpha2 all plugin args in default profile",
|
||||
data: []byte(`
|
||||
apiVersion: kubescheduler.config.k8s.io/v1alpha2
|
||||
kind: KubeSchedulerConfiguration
|
||||
profiles:
|
||||
- pluginConfig:
|
||||
- name: InterPodAffinity
|
||||
args:
|
||||
hardPodAffinityWeight: 5
|
||||
- name: NodeLabel
|
||||
args:
|
||||
presentLabels: ["foo"]
|
||||
- name: NodeResourcesFit
|
||||
args:
|
||||
ignoredResources: ["foo"]
|
||||
- name: RequestedToCapacityRatio
|
||||
args:
|
||||
shape:
|
||||
- utilization: 1
|
||||
- name: PodTopologySpread
|
||||
args:
|
||||
defaultConstraints:
|
||||
- maxSkew: 1
|
||||
topologyKey: zone
|
||||
whenUnsatisfiable: ScheduleAnyway
|
||||
- name: ServiceAffinity
|
||||
args:
|
||||
affinityLabels: ["bar"]
|
||||
`),
|
||||
wantProfiles: []config.KubeSchedulerProfile{
|
||||
{
|
||||
SchedulerName: "default-scheduler",
|
||||
PluginConfig: []config.PluginConfig{
|
||||
{
|
||||
Name: "InterPodAffinity",
|
||||
Args: &config.InterPodAffinityArgs{HardPodAffinityWeight: 5},
|
||||
},
|
||||
{
|
||||
Name: "NodeLabel",
|
||||
Args: &config.NodeLabelArgs{PresentLabels: []string{"foo"}},
|
||||
},
|
||||
{
|
||||
Name: "NodeResourcesFit",
|
||||
Args: &config.NodeResourcesFitArgs{IgnoredResources: []string{"foo"}},
|
||||
},
|
||||
{
|
||||
Name: "RequestedToCapacityRatio",
|
||||
Args: &config.RequestedToCapacityRatioArgs{
|
||||
Shape: []config.UtilizationShapePoint{{Utilization: 1}},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "PodTopologySpread",
|
||||
Args: &config.PodTopologySpreadArgs{
|
||||
DefaultConstraints: []v1.TopologySpreadConstraint{
|
||||
{MaxSkew: 1, TopologyKey: "zone", WhenUnsatisfiable: v1.ScheduleAnyway},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "ServiceAffinity",
|
||||
Args: &config.ServiceAffinityArgs{
|
||||
AffinityLabels: []string{"bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "v1alpha2 plugins can include version and kind",
|
||||
data: []byte(`
|
||||
apiVersion: kubescheduler.config.k8s.io/v1alpha2
|
||||
kind: KubeSchedulerConfiguration
|
||||
profiles:
|
||||
- pluginConfig:
|
||||
- name: NodeLabel
|
||||
args:
|
||||
apiVersion: kubescheduler.config.k8s.io/v1alpha2
|
||||
kind: NodeLabelArgs
|
||||
presentLabels: ["bars"]
|
||||
`),
|
||||
wantProfiles: []config.KubeSchedulerProfile{
|
||||
{
|
||||
SchedulerName: "default-scheduler",
|
||||
PluginConfig: []config.PluginConfig{
|
||||
{
|
||||
Name: "NodeLabel",
|
||||
Args: &config.NodeLabelArgs{PresentLabels: []string{"bars"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "plugin group and kind should match the type",
|
||||
data: []byte(`
|
||||
apiVersion: kubescheduler.config.k8s.io/v1alpha2
|
||||
kind: KubeSchedulerConfiguration
|
||||
profiles:
|
||||
- pluginConfig:
|
||||
- name: NodeLabel
|
||||
args:
|
||||
apiVersion: kubescheduler.config.k8s.io/v1alpha2
|
||||
kind: InterPodAffinityArgs
|
||||
`),
|
||||
wantErr: "decoding .profiles[0].pluginConfig[0]: args for plugin NodeLabel were not of type NodeLabelArgs.kubescheduler.config.k8s.io, got InterPodAffinityArgs.kubescheduler.config.k8s.io",
|
||||
},
|
||||
{
|
||||
// TODO: do not replicate this case for v1beta1.
|
||||
name: "v1alpha2 case insensitive RequestedToCapacityRatioArgs",
|
||||
data: []byte(`
|
||||
apiVersion: kubescheduler.config.k8s.io/v1alpha2
|
||||
kind: KubeSchedulerConfiguration
|
||||
profiles:
|
||||
- pluginConfig:
|
||||
- name: RequestedToCapacityRatio
|
||||
args:
|
||||
shape:
|
||||
- utilization: 1
|
||||
score: 2
|
||||
- Utilization: 3
|
||||
Score: 4
|
||||
resources:
|
||||
- name: Upper
|
||||
weight: 1
|
||||
- Name: lower
|
||||
weight: 2
|
||||
`),
|
||||
wantProfiles: []config.KubeSchedulerProfile{
|
||||
{
|
||||
SchedulerName: "default-scheduler",
|
||||
PluginConfig: []config.PluginConfig{
|
||||
{
|
||||
Name: "RequestedToCapacityRatio",
|
||||
Args: &config.RequestedToCapacityRatioArgs{
|
||||
Shape: []config.UtilizationShapePoint{
|
||||
{Utilization: 1, Score: 2},
|
||||
{Utilization: 3, Score: 4},
|
||||
},
|
||||
Resources: []config.ResourceSpec{
|
||||
{Name: "Upper", Weight: 1},
|
||||
{Name: "lower", Weight: 2},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "out-of-tree plugin args",
|
||||
data: []byte(`
|
||||
apiVersion: kubescheduler.config.k8s.io/v1alpha2
|
||||
kind: KubeSchedulerConfiguration
|
||||
profiles:
|
||||
- pluginConfig:
|
||||
- name: OutOfTreePlugin
|
||||
args:
|
||||
foo: bar
|
||||
`),
|
||||
wantProfiles: []config.KubeSchedulerProfile{
|
||||
{
|
||||
SchedulerName: "default-scheduler",
|
||||
PluginConfig: []config.PluginConfig{
|
||||
{
|
||||
Name: "OutOfTreePlugin",
|
||||
Args: &runtime.Unknown{
|
||||
ContentType: "application/json",
|
||||
Raw: []byte(`{"foo":"bar"}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "empty and no plugin args",
|
||||
data: []byte(`
|
||||
apiVersion: kubescheduler.config.k8s.io/v1alpha2
|
||||
kind: KubeSchedulerConfiguration
|
||||
profiles:
|
||||
- pluginConfig:
|
||||
- name: InterPodAffinity
|
||||
args:
|
||||
- name: NodeResourcesFit
|
||||
- name: OutOfTreePlugin
|
||||
args:
|
||||
`),
|
||||
wantProfiles: []config.KubeSchedulerProfile{
|
||||
{
|
||||
SchedulerName: "default-scheduler",
|
||||
PluginConfig: []config.PluginConfig{
|
||||
{
|
||||
Name: "InterPodAffinity",
|
||||
// TODO(acondor): Set default values.
|
||||
Args: &config.InterPodAffinityArgs{},
|
||||
},
|
||||
{
|
||||
Name: "NodeResourcesFit",
|
||||
Args: &config.NodeResourcesFitArgs{},
|
||||
},
|
||||
{Name: "OutOfTreePlugin"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
decoder := Codecs.UniversalDecoder()
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
obj, gvk, err := decoder.Decode(tt.data, nil, nil)
|
||||
if err != nil {
|
||||
if tt.wantErr != err.Error() {
|
||||
t.Fatalf("got err %w, want %w", err, tt.wantErr)
|
||||
}
|
||||
return
|
||||
}
|
||||
if len(tt.wantErr) != 0 {
|
||||
t.Fatal("no error produced, wanted %w", tt.wantErr)
|
||||
}
|
||||
got, ok := obj.(*config.KubeSchedulerConfiguration)
|
||||
if !ok {
|
||||
t.Fatalf("decoded into %s, want %s", gvk, config.SchemeGroupVersion.WithKind("KubeSchedulerConfiguration"))
|
||||
}
|
||||
if diff := cmp.Diff(tt.wantProfiles, got.Profiles); diff != "" {
|
||||
t.Errorf("unexpected configuration (-want,+got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestCodecsEncodePluginConfig(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
obj runtime.Object
|
||||
version schema.GroupVersion
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "v1alpha2 in-tree and out-of-tree plugins",
|
||||
version: v1alpha2.SchemeGroupVersion,
|
||||
obj: &v1alpha2.KubeSchedulerConfiguration{
|
||||
Profiles: []v1alpha2.KubeSchedulerProfile{
|
||||
{
|
||||
PluginConfig: []v1alpha2.PluginConfig{
|
||||
{
|
||||
Name: "InterPodAffinity",
|
||||
Args: runtime.RawExtension{
|
||||
Object: &v1alpha2.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: pointer.Int32Ptr(5),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "RequestedToCapacityRatio",
|
||||
Args: runtime.RawExtension{
|
||||
Object: &v1alpha2.RequestedToCapacityRatioArgs{
|
||||
Shape: []v1alpha2.UtilizationShapePoint{
|
||||
{Utilization: 1, Score: 2},
|
||||
},
|
||||
Resources: []v1alpha2.ResourceSpec{
|
||||
{Name: "lower", Weight: 2},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "OutOfTreePlugin",
|
||||
Args: runtime.RawExtension{
|
||||
Raw: []byte(`{"foo":"bar"}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: `apiVersion: kubescheduler.config.k8s.io/v1alpha2
|
||||
clientConnection:
|
||||
acceptContentTypes: ""
|
||||
burst: 0
|
||||
contentType: ""
|
||||
kubeconfig: ""
|
||||
qps: 0
|
||||
kind: KubeSchedulerConfiguration
|
||||
leaderElection:
|
||||
leaderElect: null
|
||||
leaseDuration: 0s
|
||||
renewDeadline: 0s
|
||||
resourceLock: ""
|
||||
resourceName: ""
|
||||
resourceNamespace: ""
|
||||
retryPeriod: 0s
|
||||
profiles:
|
||||
- pluginConfig:
|
||||
- args:
|
||||
apiVersion: kubescheduler.config.k8s.io/v1alpha2
|
||||
hardPodAffinityWeight: 5
|
||||
kind: InterPodAffinityArgs
|
||||
name: InterPodAffinity
|
||||
- args:
|
||||
apiVersion: kubescheduler.config.k8s.io/v1alpha2
|
||||
kind: RequestedToCapacityRatioArgs
|
||||
resources:
|
||||
- Name: lower
|
||||
Weight: 2
|
||||
shape:
|
||||
- Score: 2
|
||||
Utilization: 1
|
||||
name: RequestedToCapacityRatio
|
||||
- args:
|
||||
foo: bar
|
||||
name: OutOfTreePlugin
|
||||
`,
|
||||
},
|
||||
{
|
||||
name: "v1alpha2 in-tree and out-of-tree plugins from internal",
|
||||
version: v1alpha2.SchemeGroupVersion,
|
||||
obj: &config.KubeSchedulerConfiguration{
|
||||
Profiles: []config.KubeSchedulerProfile{
|
||||
{
|
||||
PluginConfig: []config.PluginConfig{
|
||||
{
|
||||
Name: "InterPodAffinity",
|
||||
Args: &config.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: 5,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "OutOfTreePlugin",
|
||||
Args: &runtime.Unknown{
|
||||
Raw: []byte(`{"foo":"bar"}`),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: `apiVersion: kubescheduler.config.k8s.io/v1alpha2
|
||||
bindTimeoutSeconds: 0
|
||||
clientConnection:
|
||||
acceptContentTypes: ""
|
||||
burst: 0
|
||||
contentType: ""
|
||||
kubeconfig: ""
|
||||
qps: 0
|
||||
disablePreemption: false
|
||||
enableContentionProfiling: false
|
||||
enableProfiling: false
|
||||
healthzBindAddress: ""
|
||||
kind: KubeSchedulerConfiguration
|
||||
leaderElection:
|
||||
leaderElect: false
|
||||
leaseDuration: 0s
|
||||
renewDeadline: 0s
|
||||
resourceLock: ""
|
||||
resourceName: ""
|
||||
resourceNamespace: ""
|
||||
retryPeriod: 0s
|
||||
metricsBindAddress: ""
|
||||
percentageOfNodesToScore: 0
|
||||
podInitialBackoffSeconds: 0
|
||||
podMaxBackoffSeconds: 0
|
||||
profiles:
|
||||
- pluginConfig:
|
||||
- args:
|
||||
apiVersion: kubescheduler.config.k8s.io/v1alpha2
|
||||
hardPodAffinityWeight: 5
|
||||
kind: InterPodAffinityArgs
|
||||
name: InterPodAffinity
|
||||
- args:
|
||||
foo: bar
|
||||
name: OutOfTreePlugin
|
||||
schedulerName: ""
|
||||
`,
|
||||
},
|
||||
}
|
||||
yamlInfo, ok := runtime.SerializerInfoForMediaType(Codecs.SupportedMediaTypes(), runtime.ContentTypeYAML)
|
||||
if !ok {
|
||||
t.Fatalf("unable to locate encoder -- %q is not a supported media type", runtime.ContentTypeYAML)
|
||||
}
|
||||
jsonInfo, ok := runtime.SerializerInfoForMediaType(Codecs.SupportedMediaTypes(), runtime.ContentTypeJSON)
|
||||
if !ok {
|
||||
t.Fatalf("unable to locate encoder -- %q is not a supported media type", runtime.ContentTypeJSON)
|
||||
}
|
||||
for _, tt := range testCases {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
encoder := Codecs.EncoderForVersion(yamlInfo.Serializer, tt.version)
|
||||
var buf bytes.Buffer
|
||||
if err := encoder.Encode(tt.obj, &buf); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diff := cmp.Diff(tt.want, buf.String()); diff != "" {
|
||||
t.Errorf("unexpected encoded configuration:\n%s", diff)
|
||||
}
|
||||
encoder = Codecs.EncoderForVersion(jsonInfo.Serializer, tt.version)
|
||||
buf = bytes.Buffer{}
|
||||
if err := encoder.Encode(tt.obj, &buf); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
out, err := yaml.JSONToYAML(buf.Bytes())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diff := cmp.Diff(tt.want, string(out)); diff != "" {
|
||||
t.Errorf("unexpected encoded configuration:\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -25,7 +25,6 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
@ -1598,90 +1597,58 @@ func TestPluginsConfigurationCompatibility(t *testing.T) {
|
||||
pluginConfig: []config.PluginConfig{
|
||||
{
|
||||
Name: "NodeResourcesFit",
|
||||
Args: &runtime.Unknown{
|
||||
Raw: []byte(`{
|
||||
"ignoredResources": [
|
||||
"foo",
|
||||
"bar"
|
||||
]
|
||||
}`),
|
||||
Args: &config.NodeResourcesFitArgs{
|
||||
IgnoredResources: []string{"foo", "bar"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "PodTopologySpread",
|
||||
Args: &runtime.Unknown{
|
||||
Raw: []byte(`{
|
||||
"defaultConstraints": [
|
||||
{
|
||||
"maxSkew": 1,
|
||||
"topologyKey": "foo",
|
||||
"whenUnsatisfiable": "DoNotSchedule"
|
||||
},
|
||||
{
|
||||
"maxSkew": 10,
|
||||
"topologyKey": "bar",
|
||||
"whenUnsatisfiable": "ScheduleAnyway"
|
||||
}
|
||||
]
|
||||
}`),
|
||||
Args: &config.PodTopologySpreadArgs{
|
||||
DefaultConstraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "foo",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
},
|
||||
{
|
||||
MaxSkew: 10,
|
||||
TopologyKey: "bar",
|
||||
WhenUnsatisfiable: v1.ScheduleAnyway,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "RequestedToCapacityRatio",
|
||||
Args: &runtime.Unknown{
|
||||
Raw: []byte(`{
|
||||
"shape":[
|
||||
"Utilization": 5,
|
||||
"Score": 5
|
||||
],
|
||||
"resources":[
|
||||
"Name": "cpu",
|
||||
"Weight": 10
|
||||
]
|
||||
}`),
|
||||
Args: &config.RequestedToCapacityRatioArgs{
|
||||
Shape: []config.UtilizationShapePoint{
|
||||
{Utilization: 5, Score: 5},
|
||||
},
|
||||
Resources: []config.ResourceSpec{
|
||||
{Name: "cpu", Weight: 10},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "InterPodAffinity",
|
||||
Args: &runtime.Unknown{
|
||||
Raw: []byte(`{
|
||||
"HardPodAffinityWeight": 100
|
||||
}`),
|
||||
Args: &config.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: 100,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "NodeLabel",
|
||||
Args: &runtime.Unknown{
|
||||
Raw: []byte(`{
|
||||
"presentLabels": [
|
||||
"foo",
|
||||
"bar"
|
||||
],
|
||||
"absentLabels": [
|
||||
"apple"
|
||||
],
|
||||
"presentLabelsPreference": [
|
||||
"dog"
|
||||
],
|
||||
"absentLabelsPreference": [
|
||||
"cat"
|
||||
]
|
||||
}`),
|
||||
Args: &config.NodeLabelArgs{
|
||||
PresentLabels: []string{"foo", "bar"},
|
||||
AbsentLabels: []string{"apple"},
|
||||
PresentLabelsPreference: []string{"dog"},
|
||||
AbsentLabelsPreference: []string{"cat"},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "ServiceAffinity",
|
||||
Args: &runtime.Unknown{
|
||||
Raw: []byte(`{
|
||||
affinityLabels: [
|
||||
"foo",
|
||||
"bar"
|
||||
],
|
||||
antiAffinityLabelsPreference: [
|
||||
"disk",
|
||||
"flash"
|
||||
]
|
||||
}`),
|
||||
Args: &config.ServiceAffinityArgs{
|
||||
AffinityLabels: []string{"foo", "bar"},
|
||||
AntiAffinityLabelsPreference: []string{"disk", "flash"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -20,6 +20,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/conversion:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/config/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/kube-scheduler/config/v1:go_default_library",
|
||||
"//staging/src/k8s.io/kube-scheduler/config/v1alpha2:go_default_library",
|
||||
|
@ -17,20 +17,93 @@ limitations under the License.
|
||||
package v1alpha2
|
||||
|
||||
import (
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
v1alpha2 "k8s.io/kube-scheduler/config/v1alpha2"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/kube-scheduler/config/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
var (
|
||||
// pluginArgConversionScheme is a scheme with internal and v1alpha2 registered,
|
||||
// used for defaulting/converting typed PluginConfig Args.
|
||||
// Access via getPluginArgConversionScheme()
|
||||
pluginArgConversionScheme *runtime.Scheme
|
||||
initPluginArgConversionScheme sync.Once
|
||||
)
|
||||
|
||||
func getPluginArgConversionScheme() *runtime.Scheme {
|
||||
initPluginArgConversionScheme.Do(func() {
|
||||
// set up the scheme used for plugin arg conversion
|
||||
pluginArgConversionScheme = runtime.NewScheme()
|
||||
utilruntime.Must(AddToScheme(pluginArgConversionScheme))
|
||||
utilruntime.Must(config.AddToScheme(pluginArgConversionScheme))
|
||||
})
|
||||
return pluginArgConversionScheme
|
||||
}
|
||||
|
||||
func Convert_v1alpha2_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in *v1alpha2.KubeSchedulerConfiguration, out *config.KubeSchedulerConfiguration, s conversion.Scope) error {
|
||||
if err := autoConvert_v1alpha2_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in, out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.AlgorithmSource.Provider = pointer.StringPtr(v1alpha2.SchedulerDefaultProviderName)
|
||||
return convertToInternalPluginConfigArgs(out)
|
||||
}
|
||||
|
||||
// convertToInternalPluginConfigArgs converts PluginConfig#Args into internal
|
||||
// types using a scheme, after applying defaults.
|
||||
func convertToInternalPluginConfigArgs(out *config.KubeSchedulerConfiguration) error {
|
||||
scheme := getPluginArgConversionScheme()
|
||||
for i := range out.Profiles {
|
||||
for j := range out.Profiles[i].PluginConfig {
|
||||
args := out.Profiles[i].PluginConfig[j].Args
|
||||
if args == nil {
|
||||
continue
|
||||
}
|
||||
if _, isUnknown := args.(*runtime.Unknown); isUnknown {
|
||||
continue
|
||||
}
|
||||
scheme.Default(args)
|
||||
internalArgs, err := scheme.ConvertToVersion(args, config.SchemeGroupVersion)
|
||||
if err != nil {
|
||||
return fmt.Errorf("converting .Profiles[%d].PluginConfig[%d].Args into internal type: %w", i, j, err)
|
||||
}
|
||||
out.Profiles[i].PluginConfig[j].Args = internalArgs
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Convert_config_KubeSchedulerConfiguration_To_v1alpha2_KubeSchedulerConfiguration(in *config.KubeSchedulerConfiguration, out *v1alpha2.KubeSchedulerConfiguration, s conversion.Scope) error {
|
||||
return autoConvert_config_KubeSchedulerConfiguration_To_v1alpha2_KubeSchedulerConfiguration(in, out, s)
|
||||
if err := autoConvert_config_KubeSchedulerConfiguration_To_v1alpha2_KubeSchedulerConfiguration(in, out, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return convertToExternalPluginConfigArgs(out)
|
||||
}
|
||||
|
||||
// convertToExternalPluginConfigArgs converts PluginConfig#Args into
|
||||
// external (versioned) types using a scheme.
|
||||
func convertToExternalPluginConfigArgs(out *v1alpha2.KubeSchedulerConfiguration) error {
|
||||
scheme := getPluginArgConversionScheme()
|
||||
for i := range out.Profiles {
|
||||
for j := range out.Profiles[i].PluginConfig {
|
||||
args := out.Profiles[i].PluginConfig[j].Args
|
||||
if args.Object == nil {
|
||||
continue
|
||||
}
|
||||
if _, isUnknown := args.Object.(*runtime.Unknown); isUnknown {
|
||||
continue
|
||||
}
|
||||
externalArgs, err := scheme.ConvertToVersion(args.Object, SchemeGroupVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out.Profiles[i].PluginConfig[j].Args.Object = externalArgs
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -40,7 +40,6 @@ import (
|
||||
policylisters "k8s.io/client-go/listers/policy/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog"
|
||||
schedulerv1alpha2 "k8s.io/kube-scheduler/config/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/scheduling"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||
@ -162,12 +161,13 @@ func (c *Configurator) create() (*Scheduler, error) {
|
||||
if len(ignoredExtendedResources) > 0 {
|
||||
for i := range c.profiles {
|
||||
prof := &c.profiles[i]
|
||||
prof.PluginConfig = append(prof.PluginConfig,
|
||||
frameworkplugins.NewPluginConfig(
|
||||
noderesources.FitName,
|
||||
schedulerv1alpha2.NodeResourcesFitArgs{IgnoredResources: ignoredExtendedResources},
|
||||
),
|
||||
)
|
||||
pc := schedulerapi.PluginConfig{
|
||||
Name: noderesources.FitName,
|
||||
Args: &schedulerapi.NodeResourcesFitArgs{
|
||||
IgnoredResources: ignoredExtendedResources,
|
||||
},
|
||||
}
|
||||
prof.PluginConfig = append(prof.PluginConfig, pc)
|
||||
}
|
||||
}
|
||||
|
||||
@ -280,9 +280,8 @@ func (c *Configurator) createFromConfig(policy schedulerapi.Policy) (*Scheduler,
|
||||
// HardPodAffinitySymmetricWeight in the policy config takes precedence over
|
||||
// CLI configuration.
|
||||
if policy.HardPodAffinitySymmetricWeight != 0 {
|
||||
v := policy.HardPodAffinitySymmetricWeight
|
||||
args.InterPodAffinityArgs = &schedulerv1alpha2.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: &v,
|
||||
args.InterPodAffinityArgs = &schedulerapi.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: policy.HardPodAffinitySymmetricWeight,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -18,7 +18,6 @@ package scheduler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"reflect"
|
||||
"strings"
|
||||
@ -161,10 +160,18 @@ func TestCreateFromConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
// Verify that node label predicate/priority are converted to framework plugins.
|
||||
wantArgs := `{"Name":"NodeLabel","Args":{"presentLabels":["zone"],"absentLabels":["foo"],"presentLabelsPreference":["l1"],"absentLabelsPreference":["l2"]}}`
|
||||
var wantArgs runtime.Object = &schedulerapi.NodeLabelArgs{
|
||||
PresentLabels: []string{"zone"},
|
||||
AbsentLabels: []string{"foo"},
|
||||
PresentLabelsPreference: []string{"l1"},
|
||||
AbsentLabelsPreference: []string{"l2"},
|
||||
}
|
||||
verifyPluginConvertion(t, nodelabel.Name, []string{"FilterPlugin", "ScorePlugin"}, prof, &factory.profiles[0], 6, wantArgs)
|
||||
// Verify that service affinity custom predicate/priority is converted to framework plugin.
|
||||
wantArgs = `{"Name":"ServiceAffinity","Args":{"affinityLabels":["zone","foo"],"antiAffinityLabelsPreference":["rack","zone"]}}`
|
||||
wantArgs = &schedulerapi.ServiceAffinityArgs{
|
||||
AffinityLabels: []string{"zone", "foo"},
|
||||
AntiAffinityLabelsPreference: []string{"rack", "zone"},
|
||||
}
|
||||
verifyPluginConvertion(t, serviceaffinity.Name, []string{"FilterPlugin", "ScorePlugin"}, prof, &factory.profiles[0], 6, wantArgs)
|
||||
// TODO(#87703): Verify all plugin configs.
|
||||
})
|
||||
@ -172,8 +179,8 @@ func TestCreateFromConfig(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func verifyPluginConvertion(t *testing.T, name string, extentionPoints []string, prof *profile.Profile, cfg *schedulerapi.KubeSchedulerProfile, wantWeight int32, wantArgs string) {
|
||||
for _, extensionPoint := range extentionPoints {
|
||||
func verifyPluginConvertion(t *testing.T, name string, extensionPoints []string, prof *profile.Profile, cfg *schedulerapi.KubeSchedulerProfile, wantWeight int32, wantArgs runtime.Object) {
|
||||
for _, extensionPoint := range extensionPoints {
|
||||
plugin, ok := findPlugin(name, extensionPoint, prof)
|
||||
if !ok {
|
||||
t.Fatalf("%q plugin does not exist in framework.", name)
|
||||
@ -185,12 +192,8 @@ func verifyPluginConvertion(t *testing.T, name string, extentionPoints []string,
|
||||
}
|
||||
// Verify that the policy config is converted to plugin config.
|
||||
pluginConfig := findPluginConfig(name, cfg)
|
||||
encoding, err := json.Marshal(pluginConfig)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to marshal %+v: %v", pluginConfig, err)
|
||||
}
|
||||
if string(encoding) != wantArgs {
|
||||
t.Errorf("Config for %v plugin mismatch. got: %v, want: %v", name, string(encoding), wantArgs)
|
||||
if diff := cmp.Diff(wantArgs, pluginConfig.Args); diff != "" {
|
||||
t.Errorf("Config for %v plugin mismatch (-want,+got):\n%s", name, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -250,7 +253,7 @@ func TestCreateFromConfigWithHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
for _, cfg := range factory.profiles[0].PluginConfig {
|
||||
if cfg.Name == interpodaffinity.Name {
|
||||
foundAffinityCfg = true
|
||||
wantArgs := &runtime.Unknown{Raw: []byte(`{"hardPodAffinityWeight":10}`)}
|
||||
wantArgs := &schedulerapi.InterPodAffinityArgs{HardPodAffinityWeight: 10}
|
||||
|
||||
if diff := cmp.Diff(wantArgs, cfg.Args); diff != "" {
|
||||
t.Errorf("wrong InterPodAffinity args (-want, +got): %s", diff)
|
||||
|
@ -31,10 +31,8 @@ go_library(
|
||||
"//pkg/scheduler/framework/plugins/volumerestrictions:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/volumezone:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/kube-scheduler/config/v1alpha2:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -10,6 +10,7 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/internal/parallelize:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
@ -19,9 +20,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||
"//staging/src/k8s.io/kube-scheduler/config/v1alpha2:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -33,13 +32,11 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/kube-scheduler/config/v1alpha2:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -22,9 +22,8 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
schedulerv1alpha2 "k8s.io/kube-scheduler/config/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -46,7 +45,7 @@ var _ framework.ScorePlugin = &InterPodAffinity{}
|
||||
|
||||
// InterPodAffinity is a plugin that checks inter pod affinity
|
||||
type InterPodAffinity struct {
|
||||
args schedulerv1alpha2.InterPodAffinityArgs
|
||||
args config.InterPodAffinityArgs
|
||||
sharedLister framework.SharedLister
|
||||
sync.Mutex
|
||||
}
|
||||
@ -66,26 +65,30 @@ func New(plArgs runtime.Object, h framework.FrameworkHandle) (framework.Plugin,
|
||||
if h.SnapshotSharedLister() == nil {
|
||||
return nil, fmt.Errorf("SnapshotSharedlister is nil")
|
||||
}
|
||||
pl := &InterPodAffinity{
|
||||
args, err := getArgs(plArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := ValidateHardPodAffinityWeight(field.NewPath("hardPodAffinityWeight"), args.HardPodAffinityWeight); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &InterPodAffinity{
|
||||
args: args,
|
||||
sharedLister: h.SnapshotSharedLister(),
|
||||
}
|
||||
if err := framework.DecodeInto(plArgs, &pl.args); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := validateArgs(&pl.args); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if pl.args.HardPodAffinityWeight == nil {
|
||||
pl.args.HardPodAffinityWeight = pointer.Int32Ptr(DefaultHardPodAffinityWeight)
|
||||
}
|
||||
return pl, nil
|
||||
}, nil
|
||||
}
|
||||
|
||||
func validateArgs(args *schedulerv1alpha2.InterPodAffinityArgs) error {
|
||||
if args.HardPodAffinityWeight == nil {
|
||||
return nil
|
||||
func getArgs(obj runtime.Object) (config.InterPodAffinityArgs, error) {
|
||||
if obj == nil {
|
||||
return config.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: DefaultHardPodAffinityWeight,
|
||||
}, nil
|
||||
}
|
||||
return ValidateHardPodAffinityWeight(field.NewPath("hardPodAffinityWeight"), *args.HardPodAffinityWeight)
|
||||
ptr, ok := obj.(*config.InterPodAffinityArgs)
|
||||
if !ok {
|
||||
return config.InterPodAffinityArgs{}, fmt.Errorf("want args to be of type InterPodAffinityArgs, got %T", obj)
|
||||
}
|
||||
return *ptr, nil
|
||||
}
|
||||
|
||||
// ValidateHardPodAffinityWeight validates that weight is within allowed range.
|
||||
|
@ -137,7 +137,7 @@ func (pl *InterPodAffinity) processExistingPod(state *preScoreState, existingPod
|
||||
// For every hard pod affinity term of <existingPod>, if <pod> matches the term,
|
||||
// increment <p.counts> for every node in the cluster with the same <term.TopologyKey>
|
||||
// value as that of <existingPod>'s node by the constant <ipa.hardPodAffinityWeight>
|
||||
if *pl.args.HardPodAffinityWeight > 0 {
|
||||
if pl.args.HardPodAffinityWeight > 0 {
|
||||
terms := existingPodAffinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.
|
||||
//if len(existingPodAffinity.PodAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 {
|
||||
@ -145,7 +145,7 @@ func (pl *InterPodAffinity) processExistingPod(state *preScoreState, existingPod
|
||||
//}
|
||||
for i := range terms {
|
||||
term := &terms[i]
|
||||
processedTerm, err := newWeightedAffinityTerm(existingPod, term, *pl.args.HardPodAffinityWeight)
|
||||
processedTerm, err := newWeightedAffinityTerm(existingPod, term, pl.args.HardPodAffinityWeight)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -18,17 +18,14 @@ package interpodaffinity
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
schedulerv1alpha2 "k8s.io/kube-scheduler/config/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func TestPreferredAffinity(t *testing.T) {
|
||||
@ -521,8 +518,8 @@ func TestPreferredAffinity(t *testing.T) {
|
||||
state := framework.NewCycleState()
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
p := &InterPodAffinity{
|
||||
args: schedulerv1alpha2.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: pointer.Int32Ptr(DefaultHardPodAffinityWeight),
|
||||
args: config.InterPodAffinityArgs{
|
||||
HardPodAffinityWeight: DefaultHardPodAffinityWeight,
|
||||
},
|
||||
sharedLister: snapshot,
|
||||
}
|
||||
@ -630,7 +627,7 @@ func TestPreferredAffinityWithHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
||||
|
||||
args := &runtime.Unknown{Raw: []byte(fmt.Sprintf(`{"hardPodAffinityWeight":%d}`, test.hardPodAffinityWeight))}
|
||||
args := &config.InterPodAffinityArgs{HardPodAffinityWeight: test.hardPodAffinityWeight}
|
||||
p, err := New(args, fh)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -17,13 +17,9 @@ limitations under the License.
|
||||
package plugins
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog"
|
||||
schedulerv1alpha2 "k8s.io/kube-scheduler/config/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread"
|
||||
@ -165,15 +161,15 @@ type ConfigProducerArgs struct {
|
||||
// Weight used for priority functions.
|
||||
Weight int32
|
||||
// NodeLabelArgs is the args for the NodeLabel plugin.
|
||||
NodeLabelArgs *schedulerv1alpha2.NodeLabelArgs
|
||||
NodeLabelArgs *config.NodeLabelArgs
|
||||
// RequestedToCapacityRatioArgs is the args for the RequestedToCapacityRatio plugin.
|
||||
RequestedToCapacityRatioArgs *schedulerv1alpha2.RequestedToCapacityRatioArgs
|
||||
RequestedToCapacityRatioArgs *config.RequestedToCapacityRatioArgs
|
||||
// ServiceAffinityArgs is the args for the ServiceAffinity plugin.
|
||||
ServiceAffinityArgs *schedulerv1alpha2.ServiceAffinityArgs
|
||||
ServiceAffinityArgs *config.ServiceAffinityArgs
|
||||
// NodeResourcesFitArgs is the args for the NodeResources fit filter.
|
||||
NodeResourcesFitArgs *schedulerv1alpha2.NodeResourcesFitArgs
|
||||
NodeResourcesFitArgs *config.NodeResourcesFitArgs
|
||||
// InterPodAffinityArgs is the args for InterPodAffinity plugin
|
||||
InterPodAffinityArgs *schedulerv1alpha2.InterPodAffinityArgs
|
||||
InterPodAffinityArgs *config.InterPodAffinityArgs
|
||||
}
|
||||
|
||||
// ConfigProducer returns the set of plugins and their configuration for a
|
||||
@ -227,7 +223,8 @@ func NewLegacyRegistry() *LegacyRegistry {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, noderesources.FitName, nil)
|
||||
plugins.PreFilter = appendToPluginSet(plugins.PreFilter, noderesources.FitName, nil)
|
||||
if args.NodeResourcesFitArgs != nil {
|
||||
pluginConfig = append(pluginConfig, NewPluginConfig(noderesources.FitName, args.NodeResourcesFitArgs))
|
||||
pluginConfig = append(pluginConfig,
|
||||
config.PluginConfig{Name: noderesources.FitName, Args: args.NodeResourcesFitArgs})
|
||||
}
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, nodename.Name, nil)
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, nodeports.Name, nil)
|
||||
@ -245,7 +242,8 @@ func NewLegacyRegistry() *LegacyRegistry {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, noderesources.FitName, nil)
|
||||
plugins.PreFilter = appendToPluginSet(plugins.PreFilter, noderesources.FitName, nil)
|
||||
if args.NodeResourcesFitArgs != nil {
|
||||
pluginConfig = append(pluginConfig, NewPluginConfig(noderesources.FitName, args.NodeResourcesFitArgs))
|
||||
pluginConfig = append(pluginConfig,
|
||||
config.PluginConfig{Name: noderesources.FitName, Args: args.NodeResourcesFitArgs})
|
||||
}
|
||||
return
|
||||
})
|
||||
@ -320,7 +318,8 @@ func NewLegacyRegistry() *LegacyRegistry {
|
||||
func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, nodelabel.Name, nil)
|
||||
if args.NodeLabelArgs != nil {
|
||||
pluginConfig = append(pluginConfig, NewPluginConfig(nodelabel.Name, args.NodeLabelArgs))
|
||||
pluginConfig = append(pluginConfig,
|
||||
config.PluginConfig{Name: nodelabel.Name, Args: args.NodeLabelArgs})
|
||||
}
|
||||
return
|
||||
})
|
||||
@ -328,7 +327,8 @@ func NewLegacyRegistry() *LegacyRegistry {
|
||||
func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, serviceaffinity.Name, nil)
|
||||
if args.ServiceAffinityArgs != nil {
|
||||
pluginConfig = append(pluginConfig, NewPluginConfig(serviceaffinity.Name, args.ServiceAffinityArgs))
|
||||
pluginConfig = append(pluginConfig,
|
||||
config.PluginConfig{Name: serviceaffinity.Name, Args: args.ServiceAffinityArgs})
|
||||
}
|
||||
plugins.PreFilter = appendToPluginSet(plugins.PreFilter, serviceaffinity.Name, nil)
|
||||
return
|
||||
@ -362,7 +362,8 @@ func NewLegacyRegistry() *LegacyRegistry {
|
||||
plugins.PreScore = appendToPluginSet(plugins.PreScore, interpodaffinity.Name, nil)
|
||||
plugins.Score = appendToPluginSet(plugins.Score, interpodaffinity.Name, &args.Weight)
|
||||
if args.InterPodAffinityArgs != nil {
|
||||
pluginConfig = append(pluginConfig, NewPluginConfig(interpodaffinity.Name, args.InterPodAffinityArgs))
|
||||
pluginConfig = append(pluginConfig,
|
||||
config.PluginConfig{Name: interpodaffinity.Name, Args: args.InterPodAffinityArgs})
|
||||
}
|
||||
return
|
||||
})
|
||||
@ -390,7 +391,8 @@ func NewLegacyRegistry() *LegacyRegistry {
|
||||
func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Score = appendToPluginSet(plugins.Score, noderesources.RequestedToCapacityRatioName, &args.Weight)
|
||||
if args.RequestedToCapacityRatioArgs != nil {
|
||||
pluginConfig = append(pluginConfig, NewPluginConfig(noderesources.RequestedToCapacityRatioName, args.RequestedToCapacityRatioArgs))
|
||||
pluginConfig = append(pluginConfig,
|
||||
config.PluginConfig{Name: noderesources.RequestedToCapacityRatioName, Args: args.RequestedToCapacityRatioArgs})
|
||||
}
|
||||
return
|
||||
})
|
||||
@ -403,7 +405,8 @@ func NewLegacyRegistry() *LegacyRegistry {
|
||||
weight := args.Weight * int32(len(args.NodeLabelArgs.PresentLabelsPreference)+len(args.NodeLabelArgs.AbsentLabelsPreference))
|
||||
plugins.Score = appendToPluginSet(plugins.Score, nodelabel.Name, &weight)
|
||||
if args.NodeLabelArgs != nil {
|
||||
pluginConfig = append(pluginConfig, NewPluginConfig(nodelabel.Name, args.NodeLabelArgs))
|
||||
pluginConfig = append(pluginConfig,
|
||||
config.PluginConfig{Name: nodelabel.Name, Args: args.NodeLabelArgs})
|
||||
}
|
||||
return
|
||||
})
|
||||
@ -415,7 +418,8 @@ func NewLegacyRegistry() *LegacyRegistry {
|
||||
weight := args.Weight * int32(len(args.ServiceAffinityArgs.AntiAffinityLabelsPreference))
|
||||
plugins.Score = appendToPluginSet(plugins.Score, serviceaffinity.Name, &weight)
|
||||
if args.ServiceAffinityArgs != nil {
|
||||
pluginConfig = append(pluginConfig, NewPluginConfig(serviceaffinity.Name, args.ServiceAffinityArgs))
|
||||
pluginConfig = append(pluginConfig,
|
||||
config.PluginConfig{Name: serviceaffinity.Name, Args: args.ServiceAffinityArgs})
|
||||
}
|
||||
return
|
||||
})
|
||||
@ -487,19 +491,6 @@ func appendToPluginSet(set *config.PluginSet, name string, weight *int32) *confi
|
||||
return set
|
||||
}
|
||||
|
||||
// NewPluginConfig builds a PluginConfig with the struct of args marshaled.
|
||||
// It panics if it fails to marshal.
|
||||
func NewPluginConfig(pluginName string, args interface{}) config.PluginConfig {
|
||||
encoding, err := json.Marshal(args)
|
||||
if err != nil {
|
||||
klog.Fatalf("failed to marshal %+v: %v", args, err)
|
||||
}
|
||||
return config.PluginConfig{
|
||||
Name: pluginName,
|
||||
Args: &runtime.Unknown{Raw: encoding},
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessPredicatePolicy given a PredicatePolicy, return the plugin name implementing the predicate and update
|
||||
// the ConfigProducerArgs if necessary.
|
||||
func (lr *LegacyRegistry) ProcessPredicatePolicy(policy config.PredicatePolicy, pluginArgs *ConfigProducerArgs) string {
|
||||
@ -526,7 +517,7 @@ func (lr *LegacyRegistry) ProcessPredicatePolicy(policy config.PredicatePolicy,
|
||||
if policy.Argument.ServiceAffinity != nil {
|
||||
// map LabelsPresence policy to ConfigProducerArgs that's used to configure the ServiceAffinity plugin.
|
||||
if pluginArgs.ServiceAffinityArgs == nil {
|
||||
pluginArgs.ServiceAffinityArgs = &schedulerv1alpha2.ServiceAffinityArgs{}
|
||||
pluginArgs.ServiceAffinityArgs = &config.ServiceAffinityArgs{}
|
||||
}
|
||||
pluginArgs.ServiceAffinityArgs.AffinityLabels = append(pluginArgs.ServiceAffinityArgs.AffinityLabels, policy.Argument.ServiceAffinity.Labels...)
|
||||
|
||||
@ -539,7 +530,7 @@ func (lr *LegacyRegistry) ProcessPredicatePolicy(policy config.PredicatePolicy,
|
||||
if policy.Argument.LabelsPresence != nil {
|
||||
// Map LabelPresence policy to ConfigProducerArgs that's used to configure the NodeLabel plugin.
|
||||
if pluginArgs.NodeLabelArgs == nil {
|
||||
pluginArgs.NodeLabelArgs = &schedulerv1alpha2.NodeLabelArgs{}
|
||||
pluginArgs.NodeLabelArgs = &config.NodeLabelArgs{}
|
||||
}
|
||||
if policy.Argument.LabelsPresence.Presence {
|
||||
pluginArgs.NodeLabelArgs.PresentLabels = append(pluginArgs.NodeLabelArgs.PresentLabels, policy.Argument.LabelsPresence.Labels...)
|
||||
@ -587,7 +578,7 @@ func (lr *LegacyRegistry) ProcessPriorityPolicy(policy config.PriorityPolicy, co
|
||||
// This name is then used to find the registered plugin and run the plugin instead of the priority.
|
||||
priorityName = serviceaffinity.Name
|
||||
if configProducerArgs.ServiceAffinityArgs == nil {
|
||||
configProducerArgs.ServiceAffinityArgs = &schedulerv1alpha2.ServiceAffinityArgs{}
|
||||
configProducerArgs.ServiceAffinityArgs = &config.ServiceAffinityArgs{}
|
||||
}
|
||||
configProducerArgs.ServiceAffinityArgs.AntiAffinityLabelsPreference = append(
|
||||
configProducerArgs.ServiceAffinityArgs.AntiAffinityLabelsPreference,
|
||||
@ -601,7 +592,7 @@ func (lr *LegacyRegistry) ProcessPriorityPolicy(policy config.PriorityPolicy, co
|
||||
// This name is then used to find the registered plugin and run the plugin instead of the priority.
|
||||
priorityName = nodelabel.Name
|
||||
if configProducerArgs.NodeLabelArgs == nil {
|
||||
configProducerArgs.NodeLabelArgs = &schedulerv1alpha2.NodeLabelArgs{}
|
||||
configProducerArgs.NodeLabelArgs = &config.NodeLabelArgs{}
|
||||
}
|
||||
if policy.Argument.LabelPreference.Presence {
|
||||
configProducerArgs.NodeLabelArgs.PresentLabelsPreference = append(
|
||||
@ -618,19 +609,19 @@ func (lr *LegacyRegistry) ProcessPriorityPolicy(policy config.PriorityPolicy, co
|
||||
|
||||
if policy.Argument.RequestedToCapacityRatioArguments != nil {
|
||||
policyArgs := policy.Argument.RequestedToCapacityRatioArguments
|
||||
args := &schedulerv1alpha2.RequestedToCapacityRatioArgs{}
|
||||
args := &config.RequestedToCapacityRatioArgs{}
|
||||
|
||||
args.Shape = make([]schedulerv1alpha2.UtilizationShapePoint, len(policyArgs.Shape))
|
||||
args.Shape = make([]config.UtilizationShapePoint, len(policyArgs.Shape))
|
||||
for i, s := range policyArgs.Shape {
|
||||
args.Shape[i] = schedulerv1alpha2.UtilizationShapePoint{
|
||||
args.Shape[i] = config.UtilizationShapePoint{
|
||||
Utilization: s.Utilization,
|
||||
Score: s.Score,
|
||||
}
|
||||
}
|
||||
|
||||
args.Resources = make([]schedulerv1alpha2.ResourceSpec, len(policyArgs.Resources))
|
||||
args.Resources = make([]config.ResourceSpec, len(policyArgs.Resources))
|
||||
for i, r := range policyArgs.Resources {
|
||||
args.Resources[i] = schedulerv1alpha2.ResourceSpec{
|
||||
args.Resources[i] = config.ResourceSpec{
|
||||
Name: r.Name,
|
||||
Weight: r.Weight,
|
||||
}
|
||||
|
@ -6,11 +6,11 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodelabel",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/kube-scheduler/config/v1alpha2:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -19,11 +19,11 @@ go_test(
|
||||
srcs = ["node_label_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
schedulerv1alpha2 "k8s.io/kube-scheduler/config/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
@ -51,8 +51,8 @@ func validateNoConflict(presentLabels []string, absentLabels []string) error {
|
||||
|
||||
// New initializes a new plugin and returns it.
|
||||
func New(plArgs runtime.Object, handle framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
args := schedulerv1alpha2.NodeLabelArgs{}
|
||||
if err := framework.DecodeInto(plArgs, &args); err != nil {
|
||||
args, err := getArgs(plArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := validateNoConflict(args.PresentLabels, args.AbsentLabels); err != nil {
|
||||
@ -67,10 +67,21 @@ func New(plArgs runtime.Object, handle framework.FrameworkHandle) (framework.Plu
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getArgs(obj runtime.Object) (config.NodeLabelArgs, error) {
|
||||
if obj == nil {
|
||||
return config.NodeLabelArgs{}, nil
|
||||
}
|
||||
ptr, ok := obj.(*config.NodeLabelArgs)
|
||||
if !ok {
|
||||
return config.NodeLabelArgs{}, fmt.Errorf("want args to be of type NodeLabelArgs, got %T", obj)
|
||||
}
|
||||
return *ptr, nil
|
||||
}
|
||||
|
||||
// NodeLabel checks whether a pod can fit based on the node labels which match a filter that it requests.
|
||||
type NodeLabel struct {
|
||||
handle framework.FrameworkHandle
|
||||
args schedulerv1alpha2.NodeLabelArgs
|
||||
args config.NodeLabelArgs
|
||||
}
|
||||
|
||||
var _ framework.FilterPlugin = &NodeLabel{}
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
@ -30,35 +30,54 @@ import (
|
||||
func TestValidateNodeLabelArgs(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args string
|
||||
args config.NodeLabelArgs
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
name: "happy case",
|
||||
args: `{"presentLabels" : ["foo", "bar"], "absentLabels" : ["baz"], "presentLabelsPreference" : ["foo", "bar"], "absentLabelsPreference" : ["baz"]}`,
|
||||
args: config.NodeLabelArgs{
|
||||
PresentLabels: []string{"foo", "bar"},
|
||||
AbsentLabels: []string{"baz"},
|
||||
PresentLabelsPreference: []string{"foo", "bar"},
|
||||
AbsentLabelsPreference: []string{"baz"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "label presence conflict",
|
||||
// "bar" exists in both present and absent labels therefore validation should fail.
|
||||
args: `{"presentLabels" : ["foo", "bar"], "absentLabels" : ["bar", "baz"], "presentLabelsPreference" : ["foo", "bar"], "absentLabelsPreference" : ["baz"]}`,
|
||||
err: true,
|
||||
args: config.NodeLabelArgs{
|
||||
PresentLabels: []string{"foo", "bar"},
|
||||
AbsentLabels: []string{"bar", "baz"},
|
||||
PresentLabelsPreference: []string{"foo", "bar"},
|
||||
AbsentLabelsPreference: []string{"baz"},
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "label preference conflict",
|
||||
// "bar" exists in both present and absent labels preferences therefore validation should fail.
|
||||
args: `{"presentLabels" : ["foo", "bar"], "absentLabels" : ["baz"], "presentLabelsPreference" : ["foo", "bar"], "absentLabelsPreference" : ["bar", "baz"]}`,
|
||||
err: true,
|
||||
args: config.NodeLabelArgs{
|
||||
PresentLabels: []string{"foo", "bar"},
|
||||
AbsentLabels: []string{"baz"},
|
||||
PresentLabelsPreference: []string{"foo", "bar"},
|
||||
AbsentLabelsPreference: []string{"bar", "baz"},
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
name: "both label presence and preference conflict",
|
||||
args: `{"presentLabels" : ["foo", "bar"], "absentLabels" : ["bar", "baz"], "presentLabelsPreference" : ["foo", "bar"], "absentLabelsPreference" : ["bar", "baz"]}`,
|
||||
err: true,
|
||||
args: config.NodeLabelArgs{
|
||||
PresentLabels: []string{"foo", "bar"},
|
||||
AbsentLabels: []string{"bar", "baz"},
|
||||
PresentLabelsPreference: []string{"foo", "bar"},
|
||||
AbsentLabelsPreference: []string{"bar", "baz"},
|
||||
},
|
||||
err: true,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
args := &runtime.Unknown{Raw: []byte(test.args)}
|
||||
_, err := New(args, nil)
|
||||
_, err := New(&test.args, nil)
|
||||
if test.err && err == nil {
|
||||
t.Fatal("Plugin initialization should fail.")
|
||||
}
|
||||
@ -73,59 +92,83 @@ func TestNodeLabelFilter(t *testing.T) {
|
||||
label := map[string]string{"foo": "any value", "bar": "any value"}
|
||||
var pod *v1.Pod
|
||||
tests := []struct {
|
||||
name string
|
||||
rawArgs string
|
||||
res framework.Code
|
||||
name string
|
||||
args config.NodeLabelArgs
|
||||
res framework.Code
|
||||
}{
|
||||
{
|
||||
name: "present label does not match",
|
||||
rawArgs: `{"presentLabels" : ["baz"]}`,
|
||||
res: framework.UnschedulableAndUnresolvable,
|
||||
name: "present label does not match",
|
||||
args: config.NodeLabelArgs{
|
||||
PresentLabels: []string{"baz"},
|
||||
},
|
||||
res: framework.UnschedulableAndUnresolvable,
|
||||
},
|
||||
{
|
||||
name: "absent label does not match",
|
||||
rawArgs: `{"absentLabels" : ["baz"]}`,
|
||||
res: framework.Success,
|
||||
name: "absent label does not match",
|
||||
args: config.NodeLabelArgs{
|
||||
AbsentLabels: []string{"baz"},
|
||||
},
|
||||
res: framework.Success,
|
||||
},
|
||||
{
|
||||
name: "one of two present labels matches",
|
||||
rawArgs: `{"presentLabels" : ["foo", "baz"]}`,
|
||||
res: framework.UnschedulableAndUnresolvable,
|
||||
name: "one of two present labels matches",
|
||||
args: config.NodeLabelArgs{
|
||||
PresentLabels: []string{"foo", "baz"},
|
||||
},
|
||||
res: framework.UnschedulableAndUnresolvable,
|
||||
},
|
||||
{
|
||||
name: "one of two absent labels matches",
|
||||
rawArgs: `{"absentLabels" : ["foo", "baz"]}`,
|
||||
res: framework.UnschedulableAndUnresolvable,
|
||||
name: "one of two absent labels matches",
|
||||
args: config.NodeLabelArgs{
|
||||
AbsentLabels: []string{"foo", "baz"},
|
||||
},
|
||||
res: framework.UnschedulableAndUnresolvable,
|
||||
},
|
||||
{
|
||||
name: "all present abels match",
|
||||
rawArgs: `{"presentLabels" : ["foo", "bar"]}`,
|
||||
res: framework.Success,
|
||||
name: "all present labels match",
|
||||
args: config.NodeLabelArgs{
|
||||
PresentLabels: []string{"foo", "bar"},
|
||||
},
|
||||
res: framework.Success,
|
||||
},
|
||||
{
|
||||
name: "all absent labels match",
|
||||
rawArgs: `{"absentLabels" : ["foo", "bar"]}`,
|
||||
res: framework.UnschedulableAndUnresolvable,
|
||||
name: "all absent labels match",
|
||||
args: config.NodeLabelArgs{
|
||||
AbsentLabels: []string{"foo", "bar"},
|
||||
},
|
||||
res: framework.UnschedulableAndUnresolvable,
|
||||
},
|
||||
{
|
||||
name: "both present and absent label matches",
|
||||
rawArgs: `{"presentLabels" : ["foo"], "absentLabels" : ["bar"]}`,
|
||||
res: framework.UnschedulableAndUnresolvable,
|
||||
name: "both present and absent label matches",
|
||||
args: config.NodeLabelArgs{
|
||||
PresentLabels: []string{"foo"},
|
||||
AbsentLabels: []string{"bar"},
|
||||
},
|
||||
res: framework.UnschedulableAndUnresolvable,
|
||||
},
|
||||
{
|
||||
name: "neither present nor absent label matches",
|
||||
rawArgs: `{"presentLabels" : ["foz"], "absentLabels" : ["baz"]}`,
|
||||
res: framework.UnschedulableAndUnresolvable,
|
||||
name: "neither present nor absent label matches",
|
||||
args: config.NodeLabelArgs{
|
||||
PresentLabels: []string{"foz"},
|
||||
AbsentLabels: []string{"baz"},
|
||||
},
|
||||
res: framework.UnschedulableAndUnresolvable,
|
||||
},
|
||||
{
|
||||
name: "present label matches and absent label doesn't match",
|
||||
rawArgs: `{"presentLabels" : ["foo"], "absentLabels" : ["baz"]}`,
|
||||
res: framework.Success,
|
||||
name: "present label matches and absent label doesn't match",
|
||||
args: config.NodeLabelArgs{
|
||||
PresentLabels: []string{"foo"},
|
||||
AbsentLabels: []string{"baz"},
|
||||
},
|
||||
res: framework.Success,
|
||||
},
|
||||
{
|
||||
name: "present label doesn't match and absent label matches",
|
||||
rawArgs: `{"presentLabels" : ["foz"], "absentLabels" : ["bar"]}`,
|
||||
res: framework.UnschedulableAndUnresolvable,
|
||||
name: "present label doesn't match and absent label matches",
|
||||
args: config.NodeLabelArgs{
|
||||
PresentLabels: []string{"foz"},
|
||||
AbsentLabels: []string{"bar"},
|
||||
},
|
||||
res: framework.UnschedulableAndUnresolvable,
|
||||
},
|
||||
}
|
||||
|
||||
@ -135,8 +178,7 @@ func TestNodeLabelFilter(t *testing.T) {
|
||||
nodeInfo := framework.NewNodeInfo()
|
||||
nodeInfo.SetNode(&node)
|
||||
|
||||
args := &runtime.Unknown{Raw: []byte(test.rawArgs)}
|
||||
p, err := New(args, nil)
|
||||
p, err := New(&test.args, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create plugin: %v", err)
|
||||
}
|
||||
@ -151,74 +193,103 @@ func TestNodeLabelFilter(t *testing.T) {
|
||||
|
||||
func TestNodeLabelScore(t *testing.T) {
|
||||
tests := []struct {
|
||||
rawArgs string
|
||||
want int64
|
||||
name string
|
||||
args config.NodeLabelArgs
|
||||
want int64
|
||||
name string
|
||||
}{
|
||||
{
|
||||
want: framework.MaxNodeScore,
|
||||
rawArgs: `{"presentLabelsPreference" : ["foo"]}`,
|
||||
name: "one present label match",
|
||||
want: framework.MaxNodeScore,
|
||||
args: config.NodeLabelArgs{
|
||||
PresentLabelsPreference: []string{"foo"},
|
||||
},
|
||||
name: "one present label match",
|
||||
},
|
||||
{
|
||||
want: 0,
|
||||
rawArgs: `{"presentLabelsPreference" : ["somelabel"]}`,
|
||||
name: "one present label mismatch",
|
||||
want: 0,
|
||||
args: config.NodeLabelArgs{
|
||||
PresentLabelsPreference: []string{"somelabel"},
|
||||
},
|
||||
name: "one present label mismatch",
|
||||
},
|
||||
{
|
||||
want: framework.MaxNodeScore,
|
||||
rawArgs: `{"presentLabelsPreference" : ["foo", "bar"]}`,
|
||||
name: "two present labels match",
|
||||
want: framework.MaxNodeScore,
|
||||
args: config.NodeLabelArgs{
|
||||
PresentLabelsPreference: []string{"foo", "bar"},
|
||||
},
|
||||
name: "two present labels match",
|
||||
},
|
||||
{
|
||||
want: 0,
|
||||
rawArgs: `{"presentLabelsPreference" : ["somelabel1", "somelabel2"]}`,
|
||||
name: "two present labels mismatch",
|
||||
want: 0,
|
||||
args: config.NodeLabelArgs{
|
||||
PresentLabelsPreference: []string{"somelabel1", "somelabel2"},
|
||||
},
|
||||
name: "two present labels mismatch",
|
||||
},
|
||||
{
|
||||
want: framework.MaxNodeScore / 2,
|
||||
rawArgs: `{"presentLabelsPreference" : ["foo", "somelabel"]}`,
|
||||
name: "two present labels only one matches",
|
||||
want: framework.MaxNodeScore / 2,
|
||||
args: config.NodeLabelArgs{
|
||||
PresentLabelsPreference: []string{"foo", "somelabel"},
|
||||
},
|
||||
name: "two present labels only one matches",
|
||||
},
|
||||
{
|
||||
want: 0,
|
||||
rawArgs: `{"absentLabelsPreference" : ["foo"]}`,
|
||||
name: "one absent label match",
|
||||
want: 0,
|
||||
args: config.NodeLabelArgs{
|
||||
AbsentLabelsPreference: []string{"foo"},
|
||||
},
|
||||
name: "one absent label match",
|
||||
},
|
||||
{
|
||||
want: framework.MaxNodeScore,
|
||||
rawArgs: `{"absentLabelsPreference" : ["somelabel"]}`,
|
||||
name: "one absent label mismatch",
|
||||
want: framework.MaxNodeScore,
|
||||
args: config.NodeLabelArgs{
|
||||
AbsentLabelsPreference: []string{"somelabel"},
|
||||
},
|
||||
name: "one absent label mismatch",
|
||||
},
|
||||
{
|
||||
want: 0,
|
||||
rawArgs: `{"absentLabelsPreference" : ["foo", "bar"]}`,
|
||||
name: "two absent labels match",
|
||||
want: 0,
|
||||
args: config.NodeLabelArgs{
|
||||
AbsentLabelsPreference: []string{"foo", "bar"},
|
||||
},
|
||||
name: "two absent labels match",
|
||||
},
|
||||
{
|
||||
want: framework.MaxNodeScore,
|
||||
rawArgs: `{"absentLabelsPreference" : ["somelabel1", "somelabel2"]}`,
|
||||
name: "two absent labels mismatch",
|
||||
want: framework.MaxNodeScore,
|
||||
args: config.NodeLabelArgs{
|
||||
AbsentLabelsPreference: []string{"somelabel1", "somelabel2"},
|
||||
},
|
||||
name: "two absent labels mismatch",
|
||||
},
|
||||
{
|
||||
want: framework.MaxNodeScore / 2,
|
||||
rawArgs: `{"absentLabelsPreference" : ["foo", "somelabel"]}`,
|
||||
name: "two absent labels only one matches",
|
||||
want: framework.MaxNodeScore / 2,
|
||||
args: config.NodeLabelArgs{
|
||||
AbsentLabelsPreference: []string{"foo", "somelabel"},
|
||||
},
|
||||
name: "two absent labels only one matches",
|
||||
},
|
||||
{
|
||||
want: framework.MaxNodeScore,
|
||||
rawArgs: `{"presentLabelsPreference" : ["foo", "bar"], "absentLabelsPreference" : ["somelabel1", "somelabel2"]}`,
|
||||
name: "two present labels match, two absent labels mismatch",
|
||||
want: framework.MaxNodeScore,
|
||||
args: config.NodeLabelArgs{
|
||||
PresentLabelsPreference: []string{"foo", "bar"},
|
||||
AbsentLabelsPreference: []string{"somelabel1", "somelabel2"},
|
||||
},
|
||||
name: "two present labels match, two absent labels mismatch",
|
||||
},
|
||||
{
|
||||
want: 0,
|
||||
rawArgs: `{"absentLabelsPreference" : ["foo", "bar"], "presentLabelsPreference" : ["somelabel1", "somelabel2"]}`,
|
||||
name: "two present labels both mismatch, two absent labels both match",
|
||||
want: 0,
|
||||
args: config.NodeLabelArgs{
|
||||
PresentLabelsPreference: []string{"somelabel1", "somelabel2"},
|
||||
AbsentLabelsPreference: []string{"foo", "bar"},
|
||||
},
|
||||
name: "two present labels both mismatch, two absent labels both match",
|
||||
},
|
||||
{
|
||||
want: 3 * framework.MaxNodeScore / 4,
|
||||
rawArgs: `{"presentLabelsPreference" : ["foo", "somelabel"], "absentLabelsPreference" : ["somelabel1", "somelabel2"]}`,
|
||||
name: "two present labels one matches, two absent labels mismatch",
|
||||
want: 3 * framework.MaxNodeScore / 4,
|
||||
args: config.NodeLabelArgs{
|
||||
PresentLabelsPreference: []string{"foo", "somelabel"},
|
||||
AbsentLabelsPreference: []string{"somelabel1", "somelabel2"},
|
||||
},
|
||||
name: "two present labels one matches, two absent labels mismatch",
|
||||
},
|
||||
}
|
||||
|
||||
@ -227,8 +298,7 @@ func TestNodeLabelScore(t *testing.T) {
|
||||
state := framework.NewCycleState()
|
||||
node := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: map[string]string{"foo": "", "bar": ""}}}
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(cache.NewSnapshot(nil, []*v1.Node{node})))
|
||||
args := &runtime.Unknown{Raw: []byte(test.rawArgs)}
|
||||
p, err := New(args, fh)
|
||||
p, err := New(&test.args, fh)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create plugin: %+v", err)
|
||||
}
|
||||
|
@ -26,7 +26,6 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/kube-scheduler/config/v1alpha2:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -59,15 +58,14 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||
"//staging/src/k8s.io/kube-scheduler/config/v1alpha2:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -24,9 +24,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
schedulerv1alpha2 "k8s.io/kube-scheduler/config/v1alpha2"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
@ -64,16 +64,27 @@ func (f *Fit) Name() string {
|
||||
|
||||
// NewFit initializes a new plugin and returns it.
|
||||
func NewFit(plArgs runtime.Object, _ framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
args := &schedulerv1alpha2.NodeResourcesFitArgs{}
|
||||
if err := framework.DecodeInto(plArgs, args); err != nil {
|
||||
args, err := getFitArgs(plArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fit := &Fit{}
|
||||
fit.ignoredResources = sets.NewString(args.IgnoredResources...)
|
||||
fit := &Fit{
|
||||
ignoredResources: sets.NewString(args.IgnoredResources...),
|
||||
}
|
||||
return fit, nil
|
||||
}
|
||||
|
||||
func getFitArgs(obj runtime.Object) (config.NodeResourcesFitArgs, error) {
|
||||
if obj == nil {
|
||||
return config.NodeResourcesFitArgs{}, nil
|
||||
}
|
||||
ptr, ok := obj.(*config.NodeResourcesFitArgs)
|
||||
if !ok {
|
||||
return config.NodeResourcesFitArgs{}, fmt.Errorf("want args to be of type NodeResourcesFitArgs, got %T", obj)
|
||||
}
|
||||
return *ptr, nil
|
||||
}
|
||||
|
||||
// computePodResourceRequest returns a framework.Resource that covers the largest
|
||||
// width in each resource dimension. Because init-containers run sequentially, we collect
|
||||
// the max in each dimension iteratively. In contrast, we sum the resource vectors for
|
||||
|
@ -19,10 +19,11 @@ package noderesources
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
@ -94,7 +95,7 @@ func TestEnoughRequests(t *testing.T) {
|
||||
pod *v1.Pod
|
||||
nodeInfo *framework.NodeInfo
|
||||
name string
|
||||
ignoredResources []byte
|
||||
args config.NodeResourcesFitArgs
|
||||
wantInsufficientResources []InsufficientResource
|
||||
wantStatus *framework.Status
|
||||
}{
|
||||
@ -340,8 +341,10 @@ func TestEnoughRequests(t *testing.T) {
|
||||
{
|
||||
pod: newResourcePod(
|
||||
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
|
||||
nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
|
||||
ignoredResources: []byte(`{"IgnoredResources" : ["example.com/bbb"]}`),
|
||||
nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
|
||||
args: config.NodeResourcesFitArgs{
|
||||
IgnoredResources: []string{"example.com/bbb"},
|
||||
},
|
||||
name: "skip checking ignored extended resource",
|
||||
wantInsufficientResources: []InsufficientResource{},
|
||||
},
|
||||
@ -371,8 +374,10 @@ func TestEnoughRequests(t *testing.T) {
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
|
||||
args := &runtime.Unknown{Raw: test.ignoredResources}
|
||||
p, _ := NewFit(args, nil)
|
||||
p, err := NewFit(&test.args, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cycleState := framework.NewCycleState()
|
||||
preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(context.Background(), cycleState, test.pod)
|
||||
if !preFilterStatus.IsSuccess() {
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog"
|
||||
schedulerv1alpha2 "k8s.io/kube-scheduler/config/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
@ -49,8 +48,8 @@ type functionShapePoint struct {
|
||||
|
||||
// NewRequestedToCapacityRatio initializes a new plugin and returns it.
|
||||
func NewRequestedToCapacityRatio(plArgs runtime.Object, handle framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
args := &schedulerv1alpha2.RequestedToCapacityRatioArgs{}
|
||||
if err := framework.DecodeInto(plArgs, args); err != nil {
|
||||
args, err := getRequestedToCapacityRatioArgs(plArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -92,6 +91,17 @@ func NewRequestedToCapacityRatio(plArgs runtime.Object, handle framework.Framewo
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getRequestedToCapacityRatioArgs(obj runtime.Object) (config.RequestedToCapacityRatioArgs, error) {
|
||||
if obj == nil {
|
||||
return config.RequestedToCapacityRatioArgs{}, nil
|
||||
}
|
||||
ptr, ok := obj.(*config.RequestedToCapacityRatioArgs)
|
||||
if !ok {
|
||||
return config.RequestedToCapacityRatioArgs{}, fmt.Errorf("want args to be of type RequestedToCapacityRatioArgs, got %T", obj)
|
||||
}
|
||||
return *ptr, nil
|
||||
}
|
||||
|
||||
// RequestedToCapacityRatio is a score plugin that allow users to apply bin packing
|
||||
// on core resources like CPU, Memory as well as extended resources like accelerators.
|
||||
type RequestedToCapacityRatio struct {
|
||||
|
@ -24,8 +24,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
schedulerv1alpha2 "k8s.io/kube-scheduler/config/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
@ -68,8 +67,17 @@ func TestRequestedToCapacityRatio(t *testing.T) {
|
||||
state := framework.NewCycleState()
|
||||
snapshot := cache.NewSnapshot(test.scheduledPods, test.nodes)
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
||||
args := &runtime.Unknown{Raw: []byte(`{"shape" : [{"utilization" : 0, "score" : 10}, {"utilization" : 100, "score" : 0}], "resources" : [{"name" : "memory", "weight" : 1}, {"name" : "cpu", "weight" : 1}]}`)}
|
||||
p, err := NewRequestedToCapacityRatio(args, fh)
|
||||
args := config.RequestedToCapacityRatioArgs{
|
||||
Shape: []config.UtilizationShapePoint{
|
||||
{Utilization: 0, Score: 10},
|
||||
{Utilization: 100, Score: 0},
|
||||
},
|
||||
Resources: []config.ResourceSpec{
|
||||
{Name: "memory", Weight: 1},
|
||||
{Name: "cpu", Weight: 1},
|
||||
},
|
||||
}
|
||||
p, err := NewRequestedToCapacityRatio(&args, fh)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@ -352,8 +360,16 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
state := framework.NewCycleState()
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
||||
args := &runtime.Unknown{Raw: []byte(`{"shape" : [{"utilization" : 0, "score" : 0}, {"utilization" : 100, "score" : 1}], "resources" : [{"name" : "intel.com/foo", "weight" : 1}]}`)}
|
||||
p, err := NewRequestedToCapacityRatio(args, fh)
|
||||
args := config.RequestedToCapacityRatioArgs{
|
||||
Shape: []config.UtilizationShapePoint{
|
||||
{Utilization: 0, Score: 0},
|
||||
{Utilization: 100, Score: 1},
|
||||
},
|
||||
Resources: []config.ResourceSpec{
|
||||
{Name: "intel.com/foo", Weight: 1},
|
||||
},
|
||||
}
|
||||
p, err := NewRequestedToCapacityRatio(&args, fh)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@ -587,8 +603,17 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
|
||||
state := framework.NewCycleState()
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
||||
args := &runtime.Unknown{Raw: []byte(`{"shape" : [{"utilization" : 0, "score" : 0}, {"utilization" : 100, "score" : 1}], "resources" : [{"name" : "intel.com/foo", "weight" : 3}, {"name" : "intel.com/bar", "weight": 5}]}`)}
|
||||
p, err := NewRequestedToCapacityRatio(args, fh)
|
||||
args := config.RequestedToCapacityRatioArgs{
|
||||
Shape: []config.UtilizationShapePoint{
|
||||
{Utilization: 0, Score: 0},
|
||||
{Utilization: 100, Score: 1},
|
||||
},
|
||||
Resources: []config.ResourceSpec{
|
||||
{Name: "intel.com/foo", Weight: 3},
|
||||
{Name: "intel.com/bar", Weight: 5},
|
||||
},
|
||||
}
|
||||
p, err := NewRequestedToCapacityRatio(&args, fh)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@ -608,49 +633,3 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TODO compatibility test once the plugin args move to v1beta1.
|
||||
// UtilizationShapePoint and ResourceSpec fields of the plugin args struct are not annotated
|
||||
// with JSON tags in v1alpha2 to maintain backward compatibility with the args shipped with v1.18.
|
||||
// See https://github.com/kubernetes/kubernetes/pull/88585#discussion_r405021905
|
||||
|
||||
func TestPluginArgsJSONEncodingIsCaseInsensitive(t *testing.T) {
|
||||
rawArgs := &runtime.Unknown{Raw: []byte(`
|
||||
{
|
||||
"shape": [{"Utilization": 1, "Score": 1}, {"utilization": 2, "score": 2}],
|
||||
"resources": [{"Name":"a","Weight":1},{"name":"b","weight":2}]
|
||||
}
|
||||
`)}
|
||||
|
||||
args := &schedulerv1alpha2.RequestedToCapacityRatioArgs{}
|
||||
if err := framework.DecodeInto(rawArgs, args); err != nil {
|
||||
t.Fatalf("expected no error, got: %v", err)
|
||||
}
|
||||
|
||||
expectedArgs := &schedulerv1alpha2.RequestedToCapacityRatioArgs{
|
||||
Shape: []schedulerv1alpha2.UtilizationShapePoint{
|
||||
{
|
||||
Utilization: 1,
|
||||
Score: 1,
|
||||
},
|
||||
{
|
||||
Utilization: 2,
|
||||
Score: 2,
|
||||
},
|
||||
},
|
||||
Resources: []schedulerv1alpha2.ResourceSpec{
|
||||
{
|
||||
Name: "a",
|
||||
Weight: 1,
|
||||
},
|
||||
{
|
||||
Name: "b",
|
||||
Weight: 2,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expectedArgs, args) {
|
||||
t.Errorf("expected: \n\t%#v,\ngot: \n\t%#v", expectedArgs, args)
|
||||
}
|
||||
}
|
||||
|
@ -11,6 +11,7 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/helper:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/internal/parallelize:go_default_library",
|
||||
@ -24,7 +25,6 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/kube-scheduler/config/v1alpha2:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -38,6 +38,7 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//pkg/scheduler/internal/parallelize:go_default_library",
|
||||
@ -50,7 +51,6 @@ go_test(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/kube-scheduler/config/v1alpha2:go_default_library",
|
||||
"//vendor/github.com/google/go-cmp/cmp:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
|
@ -28,7 +28,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
schedulerv1alpha2 "k8s.io/kube-scheduler/config/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/parallelize"
|
||||
@ -518,7 +518,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
informerFactory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(tt.objs...), 0)
|
||||
pl := PodTopologySpread{
|
||||
sharedLister: cache.NewSnapshot(tt.existingPods, tt.nodes),
|
||||
args: schedulerv1alpha2.PodTopologySpreadArgs{
|
||||
args: config.PodTopologySpreadArgs{
|
||||
DefaultConstraints: tt.defaultConstraints,
|
||||
},
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ import (
|
||||
"k8s.io/client-go/informers"
|
||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
schedulerv1alpha2 "k8s.io/kube-scheduler/config/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
@ -42,7 +42,7 @@ var (
|
||||
|
||||
// PodTopologySpread is a plugin that ensures pod's topologySpreadConstraints is satisfied.
|
||||
type PodTopologySpread struct {
|
||||
args schedulerv1alpha2.PodTopologySpreadArgs
|
||||
args config.PodTopologySpreadArgs
|
||||
sharedLister framework.SharedLister
|
||||
services corelisters.ServiceLister
|
||||
replicationCtrls corelisters.ReplicationControllerLister
|
||||
@ -71,17 +71,21 @@ func (pl *PodTopologySpread) BuildArgs() interface{} {
|
||||
}
|
||||
|
||||
// New initializes a new plugin and returns it.
|
||||
func New(args runtime.Object, h framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
func New(plArgs runtime.Object, h framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
if h.SnapshotSharedLister() == nil {
|
||||
return nil, fmt.Errorf("SnapshotSharedlister is nil")
|
||||
}
|
||||
pl := &PodTopologySpread{sharedLister: h.SnapshotSharedLister()}
|
||||
if err := framework.DecodeInto(args, &pl.args); err != nil {
|
||||
args, err := getArgs(plArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := validateArgs(&pl.args); err != nil {
|
||||
if err := validateArgs(&args); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pl := &PodTopologySpread{
|
||||
sharedLister: h.SnapshotSharedLister(),
|
||||
args: args,
|
||||
}
|
||||
if len(pl.args.DefaultConstraints) != 0 {
|
||||
if h.SharedInformerFactory() == nil {
|
||||
return nil, fmt.Errorf("SharedInformerFactory is nil")
|
||||
@ -91,6 +95,17 @@ func New(args runtime.Object, h framework.FrameworkHandle) (framework.Plugin, er
|
||||
return pl, nil
|
||||
}
|
||||
|
||||
func getArgs(obj runtime.Object) (config.PodTopologySpreadArgs, error) {
|
||||
if obj == nil {
|
||||
return config.PodTopologySpreadArgs{}, nil
|
||||
}
|
||||
ptr, ok := obj.(*config.PodTopologySpreadArgs)
|
||||
if !ok {
|
||||
return config.PodTopologySpreadArgs{}, fmt.Errorf("want args to be of type PodTopologySpreadArgs, got %T", obj)
|
||||
}
|
||||
return *ptr, nil
|
||||
}
|
||||
|
||||
func (pl *PodTopologySpread) setListers(factory informers.SharedInformerFactory) {
|
||||
pl.services = factory.Core().V1().Services().Lister()
|
||||
pl.replicationCtrls = factory.Core().V1().ReplicationControllers().Lister()
|
||||
@ -101,7 +116,7 @@ func (pl *PodTopologySpread) setListers(factory informers.SharedInformerFactory)
|
||||
// validateArgs replicates the validation from
|
||||
// pkg/apis/core/validation.validateTopologySpreadConstraints.
|
||||
// This has the additional check for .labelSelector to be nil.
|
||||
func validateArgs(args *schedulerv1alpha2.PodTopologySpreadArgs) error {
|
||||
func validateArgs(args *config.PodTopologySpreadArgs) error {
|
||||
var allErrs field.ErrorList
|
||||
path := field.NewPath("defaultConstraints")
|
||||
for i, c := range args.DefaultConstraints {
|
||||
|
@ -20,38 +20,25 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
schedulerv1alpha2 "k8s.io/kube-scheduler/config/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
args runtime.Unknown
|
||||
wantErr string
|
||||
wantArgs schedulerv1alpha2.PodTopologySpreadArgs
|
||||
name string
|
||||
args config.PodTopologySpreadArgs
|
||||
wantErr string
|
||||
}{
|
||||
{name: "empty args"},
|
||||
{
|
||||
name: "valid constraints",
|
||||
args: runtime.Unknown{
|
||||
ContentType: runtime.ContentTypeYAML,
|
||||
Raw: []byte(`defaultConstraints:
|
||||
- maxSkew: 1
|
||||
topologyKey: "node"
|
||||
whenUnsatisfiable: "ScheduleAnyway"
|
||||
- maxSkew: 5
|
||||
topologyKey: "zone"
|
||||
whenUnsatisfiable: "DoNotSchedule"
|
||||
`),
|
||||
},
|
||||
wantArgs: schedulerv1alpha2.PodTopologySpreadArgs{
|
||||
args: config.PodTopologySpreadArgs{
|
||||
DefaultConstraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
@ -68,66 +55,75 @@ func TestNew(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "repeated constraints",
|
||||
args: runtime.Unknown{
|
||||
ContentType: runtime.ContentTypeYAML,
|
||||
Raw: []byte(`defaultConstraints:
|
||||
- maxSkew: 1
|
||||
topologyKey: "node"
|
||||
whenUnsatisfiable: "ScheduleAnyway"
|
||||
- maxSkew: 5
|
||||
topologyKey: "node"
|
||||
whenUnsatisfiable: "ScheduleAnyway"
|
||||
`),
|
||||
args: config.PodTopologySpreadArgs{
|
||||
DefaultConstraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "node",
|
||||
WhenUnsatisfiable: v1.ScheduleAnyway,
|
||||
},
|
||||
{
|
||||
MaxSkew: 5,
|
||||
TopologyKey: "node",
|
||||
WhenUnsatisfiable: v1.ScheduleAnyway,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: "Duplicate value",
|
||||
},
|
||||
{
|
||||
name: "unknown whenUnsatisfiable",
|
||||
args: runtime.Unknown{
|
||||
ContentType: runtime.ContentTypeYAML,
|
||||
Raw: []byte(`defaultConstraints:
|
||||
- maxSkew: 1
|
||||
topologyKey: "node"
|
||||
whenUnsatisfiable: "Unknown"
|
||||
`),
|
||||
args: config.PodTopologySpreadArgs{
|
||||
DefaultConstraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "node",
|
||||
WhenUnsatisfiable: "Unknown",
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: "Unsupported value",
|
||||
},
|
||||
{
|
||||
name: "negative maxSkew",
|
||||
args: runtime.Unknown{
|
||||
ContentType: runtime.ContentTypeYAML,
|
||||
Raw: []byte(`defaultConstraints:
|
||||
- maxSkew: -1
|
||||
topologyKey: "node"
|
||||
whenUnsatisfiable: "ScheduleAnyway"
|
||||
`),
|
||||
args: config.PodTopologySpreadArgs{
|
||||
DefaultConstraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: -1,
|
||||
TopologyKey: "node",
|
||||
WhenUnsatisfiable: v1.ScheduleAnyway,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: "must be greater than zero",
|
||||
},
|
||||
{
|
||||
name: "empty topologyKey",
|
||||
args: runtime.Unknown{
|
||||
ContentType: runtime.ContentTypeYAML,
|
||||
Raw: []byte(`defaultConstraints:
|
||||
- maxSkew: 1
|
||||
whenUnsatisfiable: "ScheduleAnyway"
|
||||
`),
|
||||
args: config.PodTopologySpreadArgs{
|
||||
DefaultConstraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
WhenUnsatisfiable: v1.ScheduleAnyway,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: "can not be empty",
|
||||
},
|
||||
{
|
||||
name: "with label selector",
|
||||
args: runtime.Unknown{
|
||||
ContentType: runtime.ContentTypeYAML,
|
||||
Raw: []byte(`defaultConstraints:
|
||||
- maxSkew: 1
|
||||
topologyKey: "rack"
|
||||
whenUnsatisfiable: "ScheduleAnyway"
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
foo: "bar"
|
||||
`),
|
||||
args: config.PodTopologySpreadArgs{
|
||||
DefaultConstraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "rack",
|
||||
WhenUnsatisfiable: v1.ScheduleAnyway,
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: "constraint must not define a selector",
|
||||
},
|
||||
@ -142,7 +138,7 @@ func TestNew(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
pl, err := New(&tc.args, f)
|
||||
_, err = New(&tc.args, f)
|
||||
if len(tc.wantErr) != 0 {
|
||||
if err == nil || !strings.Contains(err.Error(), tc.wantErr) {
|
||||
t.Errorf("must fail, got error %q, want %q", err, tc.wantErr)
|
||||
@ -152,10 +148,6 @@ func TestNew(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
plObj := pl.(*PodTopologySpread)
|
||||
if diff := cmp.Diff(tc.wantArgs, plObj.BuildArgs()); diff != "" {
|
||||
t.Errorf("wrong plugin build args (-want,+got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
schedulerv1alpha2 "k8s.io/kube-scheduler/config/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/parallelize"
|
||||
@ -195,7 +195,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
|
||||
informerFactory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(tt.objs...), 0)
|
||||
pl := PodTopologySpread{
|
||||
sharedLister: cache.NewSnapshot(nil, tt.nodes),
|
||||
args: schedulerv1alpha2.PodTopologySpreadArgs{
|
||||
args: config.PodTopologySpreadArgs{
|
||||
DefaultConstraints: tt.defaultConstraints,
|
||||
},
|
||||
}
|
||||
@ -729,7 +729,7 @@ func BenchmarkTestDefaultEvenPodsSpreadPriority(b *testing.B) {
|
||||
snapshot := cache.NewSnapshot(existingPods, allNodes)
|
||||
p := &PodTopologySpread{
|
||||
sharedLister: snapshot,
|
||||
args: schedulerv1alpha2.PodTopologySpreadArgs{
|
||||
args: config.PodTopologySpreadArgs{
|
||||
DefaultConstraints: []v1.TopologySpreadConstraint{
|
||||
{MaxSkew: 1, TopologyKey: v1.LabelHostname, WhenUnsatisfiable: v1.ScheduleAnyway},
|
||||
{MaxSkew: 1, TopologyKey: v1.LabelZoneFailureDomain, WhenUnsatisfiable: v1.ScheduleAnyway},
|
||||
|
@ -6,13 +6,13 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/serviceaffinity",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/helper:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/kube-scheduler/config/v1alpha2:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -21,12 +21,12 @@ go_test(
|
||||
srcs = ["service_affinity_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1/fake:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/kube-scheduler/config/v1alpha2:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
schedulerv1alpha2 "k8s.io/kube-scheduler/config/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
@ -64,12 +64,11 @@ func (s *preFilterState) Clone() framework.StateData {
|
||||
|
||||
// New initializes a new plugin and returns it.
|
||||
func New(plArgs runtime.Object, handle framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
args := schedulerv1alpha2.ServiceAffinityArgs{}
|
||||
if err := framework.DecodeInto(plArgs, &args); err != nil {
|
||||
args, err := getArgs(plArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
informerFactory := handle.SharedInformerFactory()
|
||||
serviceLister := informerFactory.Core().V1().Services().Lister()
|
||||
serviceLister := handle.SharedInformerFactory().Core().V1().Services().Lister()
|
||||
|
||||
return &ServiceAffinity{
|
||||
sharedLister: handle.SnapshotSharedLister(),
|
||||
@ -78,9 +77,20 @@ func New(plArgs runtime.Object, handle framework.FrameworkHandle) (framework.Plu
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getArgs(obj runtime.Object) (config.ServiceAffinityArgs, error) {
|
||||
if obj == nil {
|
||||
return config.ServiceAffinityArgs{}, nil
|
||||
}
|
||||
ptr, ok := obj.(*config.ServiceAffinityArgs)
|
||||
if !ok {
|
||||
return config.ServiceAffinityArgs{}, fmt.Errorf("want args to be of type ServiceAffinityArgs, got %T", obj)
|
||||
}
|
||||
return *ptr, nil
|
||||
}
|
||||
|
||||
// ServiceAffinity is a plugin that checks service affinity.
|
||||
type ServiceAffinity struct {
|
||||
args schedulerv1alpha2.ServiceAffinityArgs
|
||||
args config.ServiceAffinityArgs
|
||||
sharedLister framework.SharedLister
|
||||
serviceLister corelisters.ServiceLister
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
schedulerv1alpha2 "k8s.io/kube-scheduler/config/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/fake"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
@ -165,7 +165,7 @@ func TestServiceAffinity(t *testing.T) {
|
||||
p := &ServiceAffinity{
|
||||
sharedLister: snapshot,
|
||||
serviceLister: fakeframework.ServiceLister(test.services),
|
||||
args: schedulerv1alpha2.ServiceAffinityArgs{
|
||||
args: config.ServiceAffinityArgs{
|
||||
AffinityLabels: test.labels,
|
||||
},
|
||||
}
|
||||
@ -389,7 +389,7 @@ func TestServiceAffinityScore(t *testing.T) {
|
||||
p := &ServiceAffinity{
|
||||
sharedLister: snapshot,
|
||||
serviceLister: serviceLister,
|
||||
args: schedulerv1alpha2.ServiceAffinityArgs{
|
||||
args: config.ServiceAffinityArgs{
|
||||
AntiAffinityLabelsPreference: test.labels,
|
||||
},
|
||||
}
|
||||
@ -606,7 +606,7 @@ func TestPreFilterDisabled(t *testing.T) {
|
||||
node := v1.Node{}
|
||||
nodeInfo.SetNode(&node)
|
||||
p := &ServiceAffinity{
|
||||
args: schedulerv1alpha2.ServiceAffinityArgs{
|
||||
args: config.ServiceAffinityArgs{
|
||||
AffinityLabels: []string{"region"},
|
||||
},
|
||||
}
|
||||
|
@ -19,6 +19,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/config/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/kube-scheduler/config/v1:go_default_library",
|
||||
"//vendor/sigs.k8s.io/yaml:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -17,10 +17,14 @@ limitations under the License.
|
||||
package v1alpha2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1"
|
||||
v1 "k8s.io/kube-scheduler/config/v1"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -73,17 +77,17 @@ type KubeSchedulerConfiguration struct {
|
||||
// Duration to wait for a binding operation to complete before timing out
|
||||
// Value must be non-negative integer. The value zero indicates no waiting.
|
||||
// If this value is nil, the default value will be used.
|
||||
BindTimeoutSeconds *int64 `json:"bindTimeoutSeconds"`
|
||||
BindTimeoutSeconds *int64 `json:"bindTimeoutSeconds,omitempty"`
|
||||
|
||||
// PodInitialBackoffSeconds is the initial backoff for unschedulable pods.
|
||||
// If specified, it must be greater than 0. If this value is null, the default value (1s)
|
||||
// will be used.
|
||||
PodInitialBackoffSeconds *int64 `json:"podInitialBackoffSeconds"`
|
||||
PodInitialBackoffSeconds *int64 `json:"podInitialBackoffSeconds,omitempty"`
|
||||
|
||||
// PodMaxBackoffSeconds is the max backoff for unschedulable pods.
|
||||
// If specified, it must be greater than podInitialBackoffSeconds. If this value is null,
|
||||
// the default value (10s) will be used.
|
||||
PodMaxBackoffSeconds *int64 `json:"podMaxBackoffSeconds"`
|
||||
PodMaxBackoffSeconds *int64 `json:"podMaxBackoffSeconds,omitempty"`
|
||||
|
||||
// Profiles are scheduling profiles that kube-scheduler supports. Pods can
|
||||
// choose to be scheduled under a particular profile by setting its associated
|
||||
@ -91,12 +95,40 @@ type KubeSchedulerConfiguration struct {
|
||||
// with the "default-scheduler" profile, if present here.
|
||||
// +listType=map
|
||||
// +listMapKey=schedulerName
|
||||
Profiles []KubeSchedulerProfile `json:"profiles"`
|
||||
Profiles []KubeSchedulerProfile `json:"profiles,omitempty"`
|
||||
|
||||
// Extenders are the list of scheduler extenders, each holding the values of how to communicate
|
||||
// with the extender. These extenders are shared by all scheduler profiles.
|
||||
// +listType=set
|
||||
Extenders []v1.Extender `json:"extenders"`
|
||||
Extenders []v1.Extender `json:"extenders,omitempty"`
|
||||
}
|
||||
|
||||
// DecodeNestedObjects decodes plugin args for known types.
|
||||
func (c *KubeSchedulerConfiguration) DecodeNestedObjects(d runtime.Decoder) error {
|
||||
for i := range c.Profiles {
|
||||
prof := &c.Profiles[i]
|
||||
for j := range prof.PluginConfig {
|
||||
err := prof.PluginConfig[j].decodeNestedObjects(d)
|
||||
if err != nil {
|
||||
return fmt.Errorf("decoding .profiles[%d].pluginConfig[%d]: %w", i, j, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeNestedObjects encodes plugin args.
|
||||
func (c *KubeSchedulerConfiguration) EncodeNestedObjects(e runtime.Encoder) error {
|
||||
for i := range c.Profiles {
|
||||
prof := &c.Profiles[i]
|
||||
for j := range prof.PluginConfig {
|
||||
err := prof.PluginConfig[j].encodeNestedObjects(e)
|
||||
if err != nil {
|
||||
return fmt.Errorf("encoding .profiles[%d].pluginConfig[%d]: %w", i, j, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// KubeSchedulerProfile is a scheduling profile.
|
||||
@ -202,3 +234,41 @@ type PluginConfig struct {
|
||||
// Args defines the arguments passed to the plugins at the time of initialization. Args can have arbitrary structure.
|
||||
Args runtime.RawExtension `json:"args,omitempty"`
|
||||
}
|
||||
|
||||
func (c *PluginConfig) decodeNestedObjects(d runtime.Decoder) error {
|
||||
gvk := SchemeGroupVersion.WithKind(c.Name + "Args")
|
||||
// dry-run to detect and skip out-of-tree plugin args.
|
||||
if _, _, err := d.Decode(nil, &gvk, nil); runtime.IsNotRegisteredError(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
obj, parsedGvk, err := d.Decode(c.Args.Raw, &gvk, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("decoding args for plugin %s: %w", c.Name, err)
|
||||
}
|
||||
if parsedGvk.GroupKind() != gvk.GroupKind() {
|
||||
return fmt.Errorf("args for plugin %s were not of type %s, got %s", c.Name, gvk.GroupKind(), parsedGvk.GroupKind())
|
||||
}
|
||||
c.Args.Object = obj
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *PluginConfig) encodeNestedObjects(e runtime.Encoder) error {
|
||||
if c.Args.Object == nil {
|
||||
return nil
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
err := e.Encode(c.Args.Object, &buf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// The <e> encoder might be a YAML encoder, but the parent encoder expects
|
||||
// JSON output, so we convert YAML back to JSON.
|
||||
// This is a no-op if <e> produces JSON.
|
||||
json, err := yaml.YAMLToJSON(buf.Bytes())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Args.Raw = json
|
||||
return nil
|
||||
}
|
||||
|
@ -17,6 +17,8 @@ limitations under the License.
|
||||
package v1alpha2
|
||||
|
||||
import (
|
||||
gojson "encoding/json"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
@ -95,7 +97,7 @@ type RequestedToCapacityRatioArgs struct {
|
||||
Resources []ResourceSpec `json:"resources,omitempty"`
|
||||
}
|
||||
|
||||
// TODO add JSON tags and backward compatible conversion in v1beta1.
|
||||
// TODO add JSON tags and remove custom unmarshalling in v1beta1.
|
||||
// UtilizationShapePoint and ResourceSpec fields are not annotated with JSON tags in v1alpha2
|
||||
// to maintain backward compatibility with the args shipped with v1.18.
|
||||
// See https://github.com/kubernetes/kubernetes/pull/88585#discussion_r405021905
|
||||
@ -108,6 +110,13 @@ type UtilizationShapePoint struct {
|
||||
Score int32
|
||||
}
|
||||
|
||||
// UnmarshalJSON provides case insensitive unmarshalling for the type.
|
||||
// TODO remove when copying to v1beta1.
|
||||
func (t *UtilizationShapePoint) UnmarshalJSON(data []byte) error {
|
||||
type internal *UtilizationShapePoint
|
||||
return gojson.Unmarshal(data, internal(t))
|
||||
}
|
||||
|
||||
// ResourceSpec represents single resource and weight for bin packing of priority RequestedToCapacityRatioArguments.
|
||||
type ResourceSpec struct {
|
||||
// Name of the resource to be managed by RequestedToCapacityRatio function.
|
||||
@ -116,6 +125,13 @@ type ResourceSpec struct {
|
||||
Weight int64
|
||||
}
|
||||
|
||||
// UnmarshalJSON provides case insensitive unmarshalling for the type.
|
||||
// TODO remove when copying to v1beta1.
|
||||
func (t *ResourceSpec) UnmarshalJSON(data []byte) error {
|
||||
type internal *ResourceSpec
|
||||
return gojson.Unmarshal(data, internal(t))
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ServiceAffinityArgs holds arguments used to configure the ServiceAffinity plugin.
|
||||
|
@ -9,6 +9,7 @@ require (
|
||||
k8s.io/api v0.0.0
|
||||
k8s.io/apimachinery v0.0.0
|
||||
k8s.io/component-base v0.0.0
|
||||
sigs.k8s.io/yaml v1.2.0
|
||||
)
|
||||
|
||||
replace (
|
||||
|
Loading…
Reference in New Issue
Block a user