mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
deprecate ResourceLimitsPriorityFunction feature gate in the scheduler
Signed-off-by: SataQiu <1527062125@qq.com>
This commit is contained in:
parent
b93e9d9395
commit
932e61aefa
@ -193,12 +193,6 @@ const (
|
|||||||
// Postpone deletion of a PV or a PVC when they are being used
|
// Postpone deletion of a PV or a PVC when they are being used
|
||||||
StorageObjectInUseProtection featuregate.Feature = "StorageObjectInUseProtection"
|
StorageObjectInUseProtection featuregate.Feature = "StorageObjectInUseProtection"
|
||||||
|
|
||||||
// owner: @aveshagarwal
|
|
||||||
// alpha: v1.9
|
|
||||||
//
|
|
||||||
// Enable resource limits priority function
|
|
||||||
ResourceLimitsPriorityFunction featuregate.Feature = "ResourceLimitsPriorityFunction"
|
|
||||||
|
|
||||||
// owner: @m1093782566
|
// owner: @m1093782566
|
||||||
// GA: v1.11
|
// GA: v1.11
|
||||||
//
|
//
|
||||||
@ -611,7 +605,6 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
|||||||
CSINodeInfo: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.19
|
CSINodeInfo: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.19
|
||||||
BlockVolume: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20
|
BlockVolume: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20
|
||||||
StorageObjectInUseProtection: {Default: true, PreRelease: featuregate.GA},
|
StorageObjectInUseProtection: {Default: true, PreRelease: featuregate.GA},
|
||||||
ResourceLimitsPriorityFunction: {Default: false, PreRelease: featuregate.Alpha},
|
|
||||||
SupportIPVSProxyMode: {Default: true, PreRelease: featuregate.GA},
|
SupportIPVSProxyMode: {Default: true, PreRelease: featuregate.GA},
|
||||||
SupportPodPidsLimit: {Default: true, PreRelease: featuregate.Beta},
|
SupportPodPidsLimit: {Default: true, PreRelease: featuregate.Beta},
|
||||||
SupportNodePidsLimit: {Default: true, PreRelease: featuregate.Beta},
|
SupportNodePidsLimit: {Default: true, PreRelease: featuregate.Beta},
|
||||||
|
@ -11,7 +11,6 @@ go_library(
|
|||||||
srcs = ["registry.go"],
|
srcs = ["registry.go"],
|
||||||
importpath = "k8s.io/kubernetes/pkg/scheduler/algorithmprovider",
|
importpath = "k8s.io/kubernetes/pkg/scheduler/algorithmprovider",
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/features:go_default_library",
|
|
||||||
"//pkg/scheduler/apis/config:go_default_library",
|
"//pkg/scheduler/apis/config:go_default_library",
|
||||||
"//pkg/scheduler/framework/plugins/defaultbinder:go_default_library",
|
"//pkg/scheduler/framework/plugins/defaultbinder:go_default_library",
|
||||||
"//pkg/scheduler/framework/plugins/defaultpodtopologyspread:go_default_library",
|
"//pkg/scheduler/framework/plugins/defaultpodtopologyspread:go_default_library",
|
||||||
@ -30,8 +29,6 @@ go_library(
|
|||||||
"//pkg/scheduler/framework/plugins/volumebinding:go_default_library",
|
"//pkg/scheduler/framework/plugins/volumebinding:go_default_library",
|
||||||
"//pkg/scheduler/framework/plugins/volumerestrictions:go_default_library",
|
"//pkg/scheduler/framework/plugins/volumerestrictions:go_default_library",
|
||||||
"//pkg/scheduler/framework/plugins/volumezone:go_default_library",
|
"//pkg/scheduler/framework/plugins/volumezone:go_default_library",
|
||||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
|
||||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -40,7 +37,6 @@ go_test(
|
|||||||
srcs = ["registry_test.go"],
|
srcs = ["registry_test.go"],
|
||||||
embed = [":go_default_library"],
|
embed = [":go_default_library"],
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/features:go_default_library",
|
|
||||||
"//pkg/scheduler/apis/config:go_default_library",
|
"//pkg/scheduler/apis/config:go_default_library",
|
||||||
"//pkg/scheduler/framework/plugins/defaultbinder:go_default_library",
|
"//pkg/scheduler/framework/plugins/defaultbinder:go_default_library",
|
||||||
"//pkg/scheduler/framework/plugins/defaultpodtopologyspread:go_default_library",
|
"//pkg/scheduler/framework/plugins/defaultpodtopologyspread:go_default_library",
|
||||||
@ -59,8 +55,6 @@ go_test(
|
|||||||
"//pkg/scheduler/framework/plugins/volumebinding:go_default_library",
|
"//pkg/scheduler/framework/plugins/volumebinding:go_default_library",
|
||||||
"//pkg/scheduler/framework/plugins/volumerestrictions:go_default_library",
|
"//pkg/scheduler/framework/plugins/volumerestrictions:go_default_library",
|
||||||
"//pkg/scheduler/framework/plugins/volumezone:go_default_library",
|
"//pkg/scheduler/framework/plugins/volumezone:go_default_library",
|
||||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
|
||||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
|
||||||
"//vendor/github.com/google/go-cmp/cmp:go_default_library",
|
"//vendor/github.com/google/go-cmp/cmp:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
@ -20,9 +20,6 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread"
|
||||||
@ -52,10 +49,7 @@ type Registry map[string]*schedulerapi.Plugins
|
|||||||
// NewRegistry returns an algorithm provider registry instance.
|
// NewRegistry returns an algorithm provider registry instance.
|
||||||
func NewRegistry() Registry {
|
func NewRegistry() Registry {
|
||||||
defaultConfig := getDefaultConfig()
|
defaultConfig := getDefaultConfig()
|
||||||
applyFeatureGates(defaultConfig)
|
|
||||||
|
|
||||||
caConfig := getClusterAutoscalerConfig()
|
caConfig := getClusterAutoscalerConfig()
|
||||||
applyFeatureGates(caConfig)
|
|
||||||
|
|
||||||
return Registry{
|
return Registry{
|
||||||
schedulerapi.SchedulerDefaultProviderName: defaultConfig,
|
schedulerapi.SchedulerDefaultProviderName: defaultConfig,
|
||||||
@ -170,14 +164,3 @@ func getClusterAutoscalerConfig() *schedulerapi.Plugins {
|
|||||||
}
|
}
|
||||||
return caConfig
|
return caConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func applyFeatureGates(config *schedulerapi.Plugins) {
|
|
||||||
// Prioritizes nodes that satisfy pod's resource limits
|
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.ResourceLimitsPriorityFunction) {
|
|
||||||
klog.Infof("Registering resourcelimits priority function")
|
|
||||||
s := schedulerapi.Plugin{Name: noderesources.ResourceLimitsName}
|
|
||||||
config.PreScore.Enabled = append(config.PreScore.Enabled, s)
|
|
||||||
s = schedulerapi.Plugin{Name: noderesources.ResourceLimitsName, Weight: 1}
|
|
||||||
config.Score.Enabled = append(config.Score.Enabled, s)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -21,9 +21,6 @@ import (
|
|||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
|
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread"
|
||||||
@ -132,194 +129,3 @@ func TestClusterAutoscalerProvider(t *testing.T) {
|
|||||||
t.Errorf("unexpected config diff (-want, +got): %s", diff)
|
t.Errorf("unexpected config diff (-want, +got): %s", diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestApplyFeatureGates(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
featuresEnabled bool
|
|
||||||
wantConfig *schedulerapi.Plugins
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "Feature gates disabled",
|
|
||||||
featuresEnabled: false,
|
|
||||||
wantConfig: &schedulerapi.Plugins{
|
|
||||||
QueueSort: &schedulerapi.PluginSet{
|
|
||||||
Enabled: []schedulerapi.Plugin{
|
|
||||||
{Name: queuesort.Name},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
PreFilter: &schedulerapi.PluginSet{
|
|
||||||
Enabled: []schedulerapi.Plugin{
|
|
||||||
{Name: noderesources.FitName},
|
|
||||||
{Name: nodeports.Name},
|
|
||||||
{Name: podtopologyspread.Name},
|
|
||||||
{Name: interpodaffinity.Name},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Filter: &schedulerapi.PluginSet{
|
|
||||||
Enabled: []schedulerapi.Plugin{
|
|
||||||
{Name: nodeunschedulable.Name},
|
|
||||||
{Name: noderesources.FitName},
|
|
||||||
{Name: nodename.Name},
|
|
||||||
{Name: nodeports.Name},
|
|
||||||
{Name: nodeaffinity.Name},
|
|
||||||
{Name: volumerestrictions.Name},
|
|
||||||
{Name: tainttoleration.Name},
|
|
||||||
{Name: nodevolumelimits.EBSName},
|
|
||||||
{Name: nodevolumelimits.GCEPDName},
|
|
||||||
{Name: nodevolumelimits.CSIName},
|
|
||||||
{Name: nodevolumelimits.AzureDiskName},
|
|
||||||
{Name: volumebinding.Name},
|
|
||||||
{Name: volumezone.Name},
|
|
||||||
{Name: podtopologyspread.Name},
|
|
||||||
{Name: interpodaffinity.Name},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
PreScore: &schedulerapi.PluginSet{
|
|
||||||
Enabled: []schedulerapi.Plugin{
|
|
||||||
{Name: interpodaffinity.Name},
|
|
||||||
{Name: podtopologyspread.Name},
|
|
||||||
{Name: defaultpodtopologyspread.Name},
|
|
||||||
{Name: tainttoleration.Name},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Score: &schedulerapi.PluginSet{
|
|
||||||
Enabled: []schedulerapi.Plugin{
|
|
||||||
{Name: noderesources.BalancedAllocationName, Weight: 1},
|
|
||||||
{Name: imagelocality.Name, Weight: 1},
|
|
||||||
{Name: interpodaffinity.Name, Weight: 1},
|
|
||||||
{Name: noderesources.LeastAllocatedName, Weight: 1},
|
|
||||||
{Name: nodeaffinity.Name, Weight: 1},
|
|
||||||
{Name: nodepreferavoidpods.Name, Weight: 10000},
|
|
||||||
{Name: podtopologyspread.Name, Weight: 2},
|
|
||||||
{Name: defaultpodtopologyspread.Name, Weight: 1},
|
|
||||||
{Name: tainttoleration.Name, Weight: 1},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Reserve: &schedulerapi.PluginSet{
|
|
||||||
Enabled: []schedulerapi.Plugin{
|
|
||||||
{Name: volumebinding.Name},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Unreserve: &schedulerapi.PluginSet{
|
|
||||||
Enabled: []schedulerapi.Plugin{
|
|
||||||
{Name: volumebinding.Name},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
PreBind: &schedulerapi.PluginSet{
|
|
||||||
Enabled: []schedulerapi.Plugin{
|
|
||||||
{Name: volumebinding.Name},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Bind: &schedulerapi.PluginSet{
|
|
||||||
Enabled: []schedulerapi.Plugin{
|
|
||||||
{Name: defaultbinder.Name},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
PostBind: &schedulerapi.PluginSet{
|
|
||||||
Enabled: []schedulerapi.Plugin{
|
|
||||||
{Name: volumebinding.Name},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "Feature gates enabled",
|
|
||||||
featuresEnabled: true,
|
|
||||||
wantConfig: &schedulerapi.Plugins{
|
|
||||||
QueueSort: &schedulerapi.PluginSet{
|
|
||||||
Enabled: []schedulerapi.Plugin{
|
|
||||||
{Name: queuesort.Name},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
PreFilter: &schedulerapi.PluginSet{
|
|
||||||
Enabled: []schedulerapi.Plugin{
|
|
||||||
{Name: noderesources.FitName},
|
|
||||||
{Name: nodeports.Name},
|
|
||||||
{Name: podtopologyspread.Name},
|
|
||||||
{Name: interpodaffinity.Name},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Filter: &schedulerapi.PluginSet{
|
|
||||||
Enabled: []schedulerapi.Plugin{
|
|
||||||
{Name: nodeunschedulable.Name},
|
|
||||||
{Name: noderesources.FitName},
|
|
||||||
{Name: nodename.Name},
|
|
||||||
{Name: nodeports.Name},
|
|
||||||
{Name: nodeaffinity.Name},
|
|
||||||
{Name: volumerestrictions.Name},
|
|
||||||
{Name: tainttoleration.Name},
|
|
||||||
{Name: nodevolumelimits.EBSName},
|
|
||||||
{Name: nodevolumelimits.GCEPDName},
|
|
||||||
{Name: nodevolumelimits.CSIName},
|
|
||||||
{Name: nodevolumelimits.AzureDiskName},
|
|
||||||
{Name: volumebinding.Name},
|
|
||||||
{Name: volumezone.Name},
|
|
||||||
{Name: podtopologyspread.Name},
|
|
||||||
{Name: interpodaffinity.Name},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
PreScore: &schedulerapi.PluginSet{
|
|
||||||
Enabled: []schedulerapi.Plugin{
|
|
||||||
{Name: interpodaffinity.Name},
|
|
||||||
{Name: podtopologyspread.Name},
|
|
||||||
{Name: defaultpodtopologyspread.Name},
|
|
||||||
{Name: tainttoleration.Name},
|
|
||||||
{Name: noderesources.ResourceLimitsName},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Score: &schedulerapi.PluginSet{
|
|
||||||
Enabled: []schedulerapi.Plugin{
|
|
||||||
{Name: noderesources.BalancedAllocationName, Weight: 1},
|
|
||||||
{Name: imagelocality.Name, Weight: 1},
|
|
||||||
{Name: interpodaffinity.Name, Weight: 1},
|
|
||||||
{Name: noderesources.LeastAllocatedName, Weight: 1},
|
|
||||||
{Name: nodeaffinity.Name, Weight: 1},
|
|
||||||
{Name: nodepreferavoidpods.Name, Weight: 10000},
|
|
||||||
{Name: podtopologyspread.Name, Weight: 2},
|
|
||||||
{Name: defaultpodtopologyspread.Name, Weight: 1},
|
|
||||||
{Name: tainttoleration.Name, Weight: 1},
|
|
||||||
{Name: noderesources.ResourceLimitsName, Weight: 1},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Reserve: &schedulerapi.PluginSet{
|
|
||||||
Enabled: []schedulerapi.Plugin{
|
|
||||||
{Name: volumebinding.Name},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Unreserve: &schedulerapi.PluginSet{
|
|
||||||
Enabled: []schedulerapi.Plugin{
|
|
||||||
{Name: volumebinding.Name},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
PreBind: &schedulerapi.PluginSet{
|
|
||||||
Enabled: []schedulerapi.Plugin{
|
|
||||||
{Name: volumebinding.Name},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Bind: &schedulerapi.PluginSet{
|
|
||||||
Enabled: []schedulerapi.Plugin{
|
|
||||||
{Name: defaultbinder.Name},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
PostBind: &schedulerapi.PluginSet{
|
|
||||||
Enabled: []schedulerapi.Plugin{
|
|
||||||
{Name: volumebinding.Name},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ResourceLimitsPriorityFunction, test.featuresEnabled)()
|
|
||||||
|
|
||||||
r := NewRegistry()
|
|
||||||
gotConfig := r[schedulerapi.SchedulerDefaultProviderName]
|
|
||||||
if diff := cmp.Diff(test.wantConfig, gotConfig); diff != "" {
|
|
||||||
t.Errorf("unexpected config diff (-want, +got): %s", diff)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
@ -8,7 +8,6 @@ go_test(
|
|||||||
],
|
],
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/apis/core/install:go_default_library",
|
"//pkg/apis/core/install:go_default_library",
|
||||||
"//pkg/features:go_default_library",
|
|
||||||
"//pkg/scheduler:go_default_library",
|
"//pkg/scheduler:go_default_library",
|
||||||
"//pkg/scheduler/algorithmprovider:go_default_library",
|
"//pkg/scheduler/algorithmprovider:go_default_library",
|
||||||
"//pkg/scheduler/apis/config:go_default_library",
|
"//pkg/scheduler/apis/config:go_default_library",
|
||||||
|
@ -31,7 +31,6 @@ import (
|
|||||||
"k8s.io/component-base/featuregate"
|
"k8s.io/component-base/featuregate"
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler"
|
"k8s.io/kubernetes/pkg/scheduler"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
"k8s.io/kubernetes/pkg/scheduler/algorithmprovider"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||||
@ -1319,34 +1318,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
|||||||
Ignorable: true,
|
Ignorable: true,
|
||||||
}},
|
}},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "enable alpha feature ResourceLimitsPriorityFunction",
|
|
||||||
JSON: `{
|
|
||||||
"kind": "Policy",
|
|
||||||
"apiVersion": "v1",
|
|
||||||
"predicates": [],
|
|
||||||
"priorities": [
|
|
||||||
{"name": "ResourceLimitsPriority", "weight": 2}
|
|
||||||
]
|
|
||||||
}`,
|
|
||||||
featureGates: map[featuregate.Feature]bool{
|
|
||||||
features.ResourceLimitsPriorityFunction: true,
|
|
||||||
},
|
|
||||||
wantPlugins: map[string][]config.Plugin{
|
|
||||||
"QueueSortPlugin": {{Name: "PrioritySort"}},
|
|
||||||
"PreScorePlugin": {
|
|
||||||
{Name: "NodeResourceLimits"},
|
|
||||||
},
|
|
||||||
"FilterPlugin": {
|
|
||||||
{Name: "NodeUnschedulable"},
|
|
||||||
{Name: "TaintToleration"},
|
|
||||||
},
|
|
||||||
"ScorePlugin": {
|
|
||||||
{Name: "NodeResourceLimits", Weight: 2},
|
|
||||||
},
|
|
||||||
"BindPlugin": {{Name: "DefaultBinder"}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
for _, tc := range testcases {
|
for _, tc := range testcases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
@ -9,7 +9,6 @@ go_library(
|
|||||||
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins",
|
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/features:go_default_library",
|
|
||||||
"//pkg/scheduler/apis/config:go_default_library",
|
"//pkg/scheduler/apis/config:go_default_library",
|
||||||
"//pkg/scheduler/framework/plugins/defaultbinder:go_default_library",
|
"//pkg/scheduler/framework/plugins/defaultbinder:go_default_library",
|
||||||
"//pkg/scheduler/framework/plugins/defaultpodtopologyspread:go_default_library",
|
"//pkg/scheduler/framework/plugins/defaultpodtopologyspread:go_default_library",
|
||||||
@ -32,7 +31,6 @@ go_library(
|
|||||||
"//pkg/scheduler/framework/plugins/volumezone:go_default_library",
|
"//pkg/scheduler/framework/plugins/volumezone:go_default_library",
|
||||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
|
||||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
@ -18,10 +18,8 @@ package plugins
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
|
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality"
|
||||||
@ -75,8 +73,6 @@ const (
|
|||||||
// ImageLocalityPriority defines the name of prioritizer function that prioritizes nodes that have images
|
// ImageLocalityPriority defines the name of prioritizer function that prioritizes nodes that have images
|
||||||
// requested by the pod present.
|
// requested by the pod present.
|
||||||
ImageLocalityPriority = "ImageLocalityPriority"
|
ImageLocalityPriority = "ImageLocalityPriority"
|
||||||
// ResourceLimitsPriority defines the nodes of prioritizer function ResourceLimitsPriority.
|
|
||||||
ResourceLimitsPriority = "ResourceLimitsPriority"
|
|
||||||
// EvenPodsSpreadPriority defines the name of prioritizer function that prioritizes nodes
|
// EvenPodsSpreadPriority defines the name of prioritizer function that prioritizes nodes
|
||||||
// which have pods and labels matching the incoming pod's topologySpreadConstraints.
|
// which have pods and labels matching the incoming pod's topologySpreadConstraints.
|
||||||
EvenPodsSpreadPriority = "EvenPodsSpreadPriority"
|
EvenPodsSpreadPriority = "EvenPodsSpreadPriority"
|
||||||
@ -442,21 +438,6 @@ func NewLegacyRegistry() *LegacyRegistry {
|
|||||||
return
|
return
|
||||||
})
|
})
|
||||||
|
|
||||||
// ResourceLimits is the last feature to be supported as predicate/priority.
|
|
||||||
// TODO: Remove this check once it graduates to GA.
|
|
||||||
// Prioritizes nodes that satisfy pod's resource limits.
|
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.ResourceLimitsPriorityFunction) {
|
|
||||||
klog.Infof("Registering resourcelimits priority function")
|
|
||||||
|
|
||||||
registry.registerPriorityConfigProducer(ResourceLimitsPriority,
|
|
||||||
func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
|
||||||
plugins.PreScore = appendToPluginSet(plugins.PreScore, noderesources.ResourceLimitsName, nil)
|
|
||||||
plugins.Score = appendToPluginSet(plugins.Score, noderesources.ResourceLimitsName, &args.Weight)
|
|
||||||
return
|
|
||||||
})
|
|
||||||
registry.DefaultPriorities[ResourceLimitsPriority] = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
return registry
|
return registry
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9,7 +9,6 @@ go_library(
|
|||||||
"most_allocated.go",
|
"most_allocated.go",
|
||||||
"requested_to_capacity_ratio.go",
|
"requested_to_capacity_ratio.go",
|
||||||
"resource_allocation.go",
|
"resource_allocation.go",
|
||||||
"resource_limits.go",
|
|
||||||
"test_util.go",
|
"test_util.go",
|
||||||
],
|
],
|
||||||
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources",
|
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources",
|
||||||
@ -53,7 +52,6 @@ go_test(
|
|||||||
"least_allocated_test.go",
|
"least_allocated_test.go",
|
||||||
"most_allocated_test.go",
|
"most_allocated_test.go",
|
||||||
"requested_to_capacity_ratio_test.go",
|
"requested_to_capacity_ratio_test.go",
|
||||||
"resource_limits_test.go",
|
|
||||||
],
|
],
|
||||||
embed = [":go_default_library"],
|
embed = [":go_default_library"],
|
||||||
deps = [
|
deps = [
|
||||||
|
@ -1,160 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2019 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package noderesources
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ResourceLimits is a score plugin that increases score of input node by 1 if the node satisfies
|
|
||||||
// input pod's resource limits
|
|
||||||
type ResourceLimits struct {
|
|
||||||
handle framework.FrameworkHandle
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ = framework.PreScorePlugin(&ResourceLimits{})
|
|
||||||
var _ = framework.ScorePlugin(&ResourceLimits{})
|
|
||||||
|
|
||||||
const (
|
|
||||||
// ResourceLimitsName is the name of the plugin used in the plugin registry and configurations.
|
|
||||||
ResourceLimitsName = "NodeResourceLimits"
|
|
||||||
|
|
||||||
// preScoreStateKey is the key in CycleState to NodeResourceLimits pre-computed data.
|
|
||||||
// Using the name of the plugin will likely help us avoid collisions with other plugins.
|
|
||||||
preScoreStateKey = "PreScore" + ResourceLimitsName
|
|
||||||
)
|
|
||||||
|
|
||||||
// preScoreState computed at PreScore and used at Score.
|
|
||||||
type preScoreState struct {
|
|
||||||
podResourceRequest *framework.Resource
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clone the preScore state.
|
|
||||||
func (s *preScoreState) Clone() framework.StateData {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
// Name returns name of the plugin. It is used in logs, etc.
|
|
||||||
func (rl *ResourceLimits) Name() string {
|
|
||||||
return ResourceLimitsName
|
|
||||||
}
|
|
||||||
|
|
||||||
// PreScore builds and writes cycle state used by Score and NormalizeScore.
|
|
||||||
func (rl *ResourceLimits) PreScore(
|
|
||||||
pCtx context.Context,
|
|
||||||
cycleState *framework.CycleState,
|
|
||||||
pod *v1.Pod,
|
|
||||||
nodes []*v1.Node,
|
|
||||||
) *framework.Status {
|
|
||||||
if len(nodes) == 0 {
|
|
||||||
// No nodes to score.
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if rl.handle.SnapshotSharedLister() == nil {
|
|
||||||
return framework.NewStatus(framework.Error, fmt.Sprintf("empty shared lister"))
|
|
||||||
}
|
|
||||||
s := &preScoreState{
|
|
||||||
podResourceRequest: getResourceLimits(pod),
|
|
||||||
}
|
|
||||||
cycleState.Write(preScoreStateKey, s)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getPodResource(cycleState *framework.CycleState) (*framework.Resource, error) {
|
|
||||||
c, err := cycleState.Read(preScoreStateKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Error reading %q from cycleState: %v", preScoreStateKey, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
s, ok := c.(*preScoreState)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("%+v convert to ResourceLimits.preScoreState error", c)
|
|
||||||
}
|
|
||||||
return s.podResourceRequest, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Score invoked at the Score extension point.
|
|
||||||
// The "score" returned in this function is the matching number of pods on the `nodeName`.
|
|
||||||
// Currently works as follows:
|
|
||||||
// If a node does not publish its allocatable resources (cpu and memory both), the node score is not affected.
|
|
||||||
// If a pod does not specify its cpu and memory limits both, the node score is not affected.
|
|
||||||
// If one or both of cpu and memory limits of the pod are satisfied, the node is assigned a score of 1.
|
|
||||||
// Rationale of choosing the lowest score of 1 is that this is mainly selected to break ties between nodes that have
|
|
||||||
// same scores assigned by one of least and most requested priority functions.
|
|
||||||
func (rl *ResourceLimits) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) {
|
|
||||||
nodeInfo, err := rl.handle.SnapshotSharedLister().NodeInfos().Get(nodeName)
|
|
||||||
if err != nil || nodeInfo.Node() == nil {
|
|
||||||
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v, node is nil: %v", nodeName, err, nodeInfo.Node() == nil))
|
|
||||||
}
|
|
||||||
|
|
||||||
podLimits, err := getPodResource(state)
|
|
||||||
if err != nil {
|
|
||||||
return 0, framework.NewStatus(framework.Error, err.Error())
|
|
||||||
}
|
|
||||||
|
|
||||||
cpuScore := computeScore(podLimits.MilliCPU, nodeInfo.Allocatable.MilliCPU)
|
|
||||||
memScore := computeScore(podLimits.Memory, nodeInfo.Allocatable.Memory)
|
|
||||||
|
|
||||||
score := int64(0)
|
|
||||||
if cpuScore == 1 || memScore == 1 {
|
|
||||||
score = 1
|
|
||||||
}
|
|
||||||
return score, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ScoreExtensions of the Score plugin.
|
|
||||||
func (rl *ResourceLimits) ScoreExtensions() framework.ScoreExtensions {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewResourceLimits initializes a new plugin and returns it.
|
|
||||||
func NewResourceLimits(_ runtime.Object, h framework.FrameworkHandle) (framework.Plugin, error) {
|
|
||||||
return &ResourceLimits{handle: h}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getResourceLimits computes resource limits for input pod.
|
|
||||||
// The reason to create this new function is to be consistent with other
|
|
||||||
// priority functions because most or perhaps all priority functions work
|
|
||||||
// with framework.Resource.
|
|
||||||
func getResourceLimits(pod *v1.Pod) *framework.Resource {
|
|
||||||
result := &framework.Resource{}
|
|
||||||
for _, container := range pod.Spec.Containers {
|
|
||||||
result.Add(container.Resources.Limits)
|
|
||||||
}
|
|
||||||
|
|
||||||
// take max_resource(sum_pod, any_init_container)
|
|
||||||
for _, container := range pod.Spec.InitContainers {
|
|
||||||
result.SetMaxResource(container.Resources.Limits)
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// computeScore returns 1 if limit value is less than or equal to allocatable
|
|
||||||
// value, otherwise it returns 0.
|
|
||||||
func computeScore(limit, allocatable int64) int64 {
|
|
||||||
if limit != 0 && allocatable != 0 && limit <= allocatable {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
@ -1,175 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2017 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package noderesources
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestResourceLimits(t *testing.T) {
|
|
||||||
noResources := v1.PodSpec{
|
|
||||||
Containers: []v1.Container{},
|
|
||||||
}
|
|
||||||
|
|
||||||
cpuOnly := v1.PodSpec{
|
|
||||||
NodeName: "machine1",
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Resources: v1.ResourceRequirements{
|
|
||||||
Limits: v1.ResourceList{
|
|
||||||
v1.ResourceCPU: resource.MustParse("1000m"),
|
|
||||||
v1.ResourceMemory: resource.MustParse("0"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Resources: v1.ResourceRequirements{
|
|
||||||
Limits: v1.ResourceList{
|
|
||||||
v1.ResourceCPU: resource.MustParse("2000m"),
|
|
||||||
v1.ResourceMemory: resource.MustParse("0"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
memOnly := v1.PodSpec{
|
|
||||||
NodeName: "machine2",
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Resources: v1.ResourceRequirements{
|
|
||||||
Limits: v1.ResourceList{
|
|
||||||
v1.ResourceCPU: resource.MustParse("0"),
|
|
||||||
v1.ResourceMemory: resource.MustParse("2000"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Resources: v1.ResourceRequirements{
|
|
||||||
Limits: v1.ResourceList{
|
|
||||||
v1.ResourceCPU: resource.MustParse("0"),
|
|
||||||
v1.ResourceMemory: resource.MustParse("3000"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
cpuAndMemory := v1.PodSpec{
|
|
||||||
NodeName: "machine2",
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Resources: v1.ResourceRequirements{
|
|
||||||
Limits: v1.ResourceList{
|
|
||||||
v1.ResourceCPU: resource.MustParse("1000m"),
|
|
||||||
v1.ResourceMemory: resource.MustParse("2000"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Resources: v1.ResourceRequirements{
|
|
||||||
Limits: v1.ResourceList{
|
|
||||||
v1.ResourceCPU: resource.MustParse("2000m"),
|
|
||||||
v1.ResourceMemory: resource.MustParse("3000"),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
tests := []struct {
|
|
||||||
// input pod
|
|
||||||
pod *v1.Pod
|
|
||||||
nodes []*v1.Node
|
|
||||||
expectedList framework.NodeScoreList
|
|
||||||
name string
|
|
||||||
skipPreScore bool
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
pod: &v1.Pod{Spec: noResources},
|
|
||||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 0), makeNode("machine3", 0, 10000), makeNode("machine4", 0, 0)},
|
|
||||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}, {Name: "machine3", Score: 0}, {Name: "machine4", Score: 0}},
|
|
||||||
name: "pod does not specify its resource limits",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
pod: &v1.Pod{Spec: cpuOnly},
|
|
||||||
nodes: []*v1.Node{makeNode("machine1", 3000, 10000), makeNode("machine2", 2000, 10000)},
|
|
||||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 1}, {Name: "machine2", Score: 0}},
|
|
||||||
name: "pod only specifies cpu limits",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
pod: &v1.Pod{Spec: memOnly},
|
|
||||||
nodes: []*v1.Node{makeNode("machine1", 4000, 4000), makeNode("machine2", 5000, 10000)},
|
|
||||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 1}},
|
|
||||||
name: "pod only specifies mem limits",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
|
||||||
nodes: []*v1.Node{makeNode("machine1", 4000, 4000), makeNode("machine2", 5000, 10000)},
|
|
||||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 1}, {Name: "machine2", Score: 1}},
|
|
||||||
name: "pod specifies both cpu and mem limits",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
|
||||||
nodes: []*v1.Node{makeNode("machine1", 0, 0)},
|
|
||||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}},
|
|
||||||
name: "node does not advertise its allocatables",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
|
||||||
nodes: []*v1.Node{makeNode("machine1", 0, 0)},
|
|
||||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}},
|
|
||||||
skipPreScore: true,
|
|
||||||
name: "preScore skipped",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
t.Run(test.name, func(t *testing.T) {
|
|
||||||
snapshot := cache.NewSnapshot(nil, test.nodes)
|
|
||||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
|
||||||
p := &ResourceLimits{handle: fh}
|
|
||||||
for i := range test.nodes {
|
|
||||||
state := framework.NewCycleState()
|
|
||||||
if !test.skipPreScore {
|
|
||||||
status := p.PreScore(context.Background(), state, test.pod, test.nodes)
|
|
||||||
if !status.IsSuccess() {
|
|
||||||
t.Errorf("unexpected error: %v", status)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
gotScore, err := p.Score(context.Background(), state, test.pod, test.nodes[i].Name)
|
|
||||||
if test.skipPreScore {
|
|
||||||
if err == nil {
|
|
||||||
t.Errorf("expected error")
|
|
||||||
}
|
|
||||||
} else if err != nil {
|
|
||||||
t.Errorf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
if test.expectedList[i].Score != gotScore {
|
|
||||||
t.Errorf("gotScore %v, wantScore %v", gotScore, test.expectedList[i].Score)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
@ -58,7 +58,6 @@ func NewInTreeRegistry() framework.Registry {
|
|||||||
noderesources.MostAllocatedName: noderesources.NewMostAllocated,
|
noderesources.MostAllocatedName: noderesources.NewMostAllocated,
|
||||||
noderesources.LeastAllocatedName: noderesources.NewLeastAllocated,
|
noderesources.LeastAllocatedName: noderesources.NewLeastAllocated,
|
||||||
noderesources.RequestedToCapacityRatioName: noderesources.NewRequestedToCapacityRatio,
|
noderesources.RequestedToCapacityRatioName: noderesources.NewRequestedToCapacityRatio,
|
||||||
noderesources.ResourceLimitsName: noderesources.NewResourceLimits,
|
|
||||||
volumebinding.Name: volumebinding.New,
|
volumebinding.Name: volumebinding.New,
|
||||||
volumerestrictions.Name: volumerestrictions.New,
|
volumerestrictions.Name: volumerestrictions.New,
|
||||||
volumezone.Name: volumezone.New,
|
volumezone.Name: volumezone.New,
|
||||||
|
@ -13,7 +13,6 @@ go_test(
|
|||||||
"extender_test.go",
|
"extender_test.go",
|
||||||
"framework_test.go",
|
"framework_test.go",
|
||||||
"main_test.go",
|
"main_test.go",
|
||||||
"plugins_test.go",
|
|
||||||
"predicates_test.go",
|
"predicates_test.go",
|
||||||
"preemption_test.go",
|
"preemption_test.go",
|
||||||
"priorities_test.go",
|
"priorities_test.go",
|
||||||
|
@ -1,74 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2020 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package scheduler
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
testutils "k8s.io/kubernetes/test/integration/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestNodeResourceLimits(t *testing.T) {
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ResourceLimitsPriorityFunction, true)()
|
|
||||||
|
|
||||||
testCtx := initTest(t, "node-resource-limits")
|
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
|
||||||
|
|
||||||
// Add one node
|
|
||||||
expectedNode, err := createNode(testCtx.ClientSet, "test-node1", &v1.ResourceList{
|
|
||||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
|
||||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
|
||||||
v1.ResourceMemory: *resource.NewQuantity(2000, resource.DecimalSI),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Cannot create node: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add another node with less resource
|
|
||||||
_, err = createNode(testCtx.ClientSet, "test-node2", &v1.ResourceList{
|
|
||||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
|
||||||
v1.ResourceCPU: *resource.NewMilliQuantity(1000, resource.DecimalSI),
|
|
||||||
v1.ResourceMemory: *resource.NewQuantity(1000, resource.DecimalSI),
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Cannot create node: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
podName := "pod-with-resource-limits"
|
|
||||||
pod, err := runPausePod(testCtx.ClientSet, initPausePod(&pausePodConfig{
|
|
||||||
Name: podName,
|
|
||||||
Namespace: testCtx.NS.Name,
|
|
||||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
|
||||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
|
||||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI)},
|
|
||||||
},
|
|
||||||
}))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error running pause pod: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if pod.Spec.NodeName != expectedNode.Name {
|
|
||||||
t.Errorf("pod %v got scheduled on an unexpected node: %v. Expected node: %v.", podName, pod.Spec.NodeName, expectedNode.Name)
|
|
||||||
} else {
|
|
||||||
t.Logf("pod %v got successfully scheduled on node %v.", podName, pod.Spec.NodeName)
|
|
||||||
}
|
|
||||||
}
|
|
Loading…
Reference in New Issue
Block a user