mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 14:37:00 +00:00
Merge pull request #106002 from kerthcet/feature/refactor-NodeResourcesFit-plugin
refactor scheudler's node resource plugins
This commit is contained in:
commit
ec8e6e8778
@ -42,10 +42,7 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
&InterPodAffinityArgs{},
|
||||
&NodeResourcesFitArgs{},
|
||||
&PodTopologySpreadArgs{},
|
||||
&RequestedToCapacityRatioArgs{},
|
||||
&VolumeBindingArgs{},
|
||||
&NodeResourcesLeastAllocatedArgs{},
|
||||
&NodeResourcesMostAllocatedArgs{},
|
||||
&NodeResourcesBalancedAllocationArgs{},
|
||||
&NodeAffinityArgs{},
|
||||
)
|
||||
|
@ -113,44 +113,6 @@ type PodTopologySpreadArgs struct {
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// RequestedToCapacityRatioArgs holds arguments used to configure RequestedToCapacityRatio plugin.
|
||||
type RequestedToCapacityRatioArgs struct {
|
||||
metav1.TypeMeta
|
||||
|
||||
// Points defining priority function shape
|
||||
Shape []UtilizationShapePoint
|
||||
// Resources to be considered when scoring.
|
||||
// The default resource set includes "cpu" and "memory" with an equal weight.
|
||||
// Weights should be larger than 0.
|
||||
Resources []ResourceSpec
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// NodeResourcesLeastAllocatedArgs holds arguments used to configure NodeResourcesLeastAllocated plugin.
|
||||
type NodeResourcesLeastAllocatedArgs struct {
|
||||
metav1.TypeMeta
|
||||
|
||||
// Resources to be considered when scoring.
|
||||
// The default resource set includes "cpu" and "memory" with an equal weight.
|
||||
// Allowed weights go from 1 to 100.
|
||||
Resources []ResourceSpec
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// NodeResourcesMostAllocatedArgs holds arguments used to configure NodeResourcesMostAllocated plugin.
|
||||
type NodeResourcesMostAllocatedArgs struct {
|
||||
metav1.TypeMeta
|
||||
|
||||
// Resources to be considered when scoring.
|
||||
// The default resource set includes "cpu" and "memory" with an equal weight.
|
||||
// Allowed weights go from 1 to 100.
|
||||
Resources []ResourceSpec
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// NodeResourcesBalancedAllocationArgs holds arguments used to configure NodeResourcesBalancedAllocation plugin.
|
||||
type NodeResourcesBalancedAllocationArgs struct {
|
||||
metav1.TypeMeta
|
||||
|
@ -128,11 +128,7 @@ type removedPlugins struct {
|
||||
var removedPluginsByVersion = []removedPlugins{
|
||||
{
|
||||
schemeGroupVersion: v1beta2.SchemeGroupVersion.String(),
|
||||
plugins: []string{
|
||||
"NodeResourcesLeastAllocated",
|
||||
"NodeResourcesMostAllocated",
|
||||
"RequestedToCapacityRatio",
|
||||
},
|
||||
plugins: []string{},
|
||||
},
|
||||
{
|
||||
schemeGroupVersion: v1beta3.SchemeGroupVersion.String(),
|
||||
@ -183,10 +179,7 @@ func validatePluginConfig(path *field.Path, apiVersion string, profile *config.K
|
||||
"NodeAffinity": ValidateNodeAffinityArgs,
|
||||
"NodeResourcesBalancedAllocation": ValidateNodeResourcesBalancedAllocationArgs,
|
||||
"NodeResourcesFitArgs": ValidateNodeResourcesFitArgs,
|
||||
"NodeResourcesLeastAllocated": ValidateNodeResourcesLeastAllocatedArgs,
|
||||
"NodeResourcesMostAllocated": ValidateNodeResourcesMostAllocatedArgs,
|
||||
"PodTopologySpread": ValidatePodTopologySpreadArgs,
|
||||
"RequestedToCapacityRatio": ValidateRequestedToCapacityRatioArgs,
|
||||
"VolumeBinding": ValidateVolumeBindingArgs,
|
||||
}
|
||||
|
||||
|
@ -164,14 +164,6 @@ func validateConstraintNotRepeat(path *field.Path, constraints []v1.TopologySpre
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateRequestedToCapacityRatioArgs validates that RequestedToCapacityRatioArgs are correct.
|
||||
func ValidateRequestedToCapacityRatioArgs(path *field.Path, args *config.RequestedToCapacityRatioArgs) error {
|
||||
var allErrs field.ErrorList
|
||||
allErrs = append(allErrs, validateFunctionShape(args.Shape, path.Child("shape"))...)
|
||||
allErrs = append(allErrs, validateResourcesNoMax(args.Resources, path.Child("resources"))...)
|
||||
return allErrs.ToAggregate()
|
||||
}
|
||||
|
||||
func validateFunctionShape(shape []config.UtilizationShapePoint, path *field.Path) field.ErrorList {
|
||||
const (
|
||||
minUtilization = 0
|
||||
@ -209,28 +201,6 @@ func validateFunctionShape(shape []config.UtilizationShapePoint, path *field.Pat
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// weight of resource is allowed to exceed 100, this is only applicable to `RequestedToCapacityRatio` plugin for backwards compatibility reason.
|
||||
func validateResourcesNoMax(resources []config.ResourceSpec, p *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
for i, r := range resources {
|
||||
if r.Weight < 1 {
|
||||
allErrs = append(allErrs, field.Invalid(p.Index(i).Child("weight"), r.Weight,
|
||||
fmt.Sprintf("resource weight of %s not in valid range [1, inf)", r.Name)))
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateNodeResourcesLeastAllocatedArgs validates that NodeResourcesLeastAllocatedArgs are correct.
|
||||
func ValidateNodeResourcesLeastAllocatedArgs(path *field.Path, args *config.NodeResourcesLeastAllocatedArgs) error {
|
||||
return validateResources(args.Resources, path.Child("resources")).ToAggregate()
|
||||
}
|
||||
|
||||
// ValidateNodeResourcesMostAllocatedArgs validates that NodeResourcesMostAllocatedArgs are correct.
|
||||
func ValidateNodeResourcesMostAllocatedArgs(path *field.Path, args *config.NodeResourcesMostAllocatedArgs) error {
|
||||
return validateResources(args.Resources, path.Child("resources")).ToAggregate()
|
||||
}
|
||||
|
||||
func validateResources(resources []config.ResourceSpec, p *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
for i, resource := range resources {
|
||||
|
@ -358,422 +358,6 @@ func TestValidatePodTopologySpreadArgs(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateRequestedToCapacityRatioArgs(t *testing.T) {
|
||||
cases := map[string]struct {
|
||||
args config.RequestedToCapacityRatioArgs
|
||||
wantErrs field.ErrorList
|
||||
}{
|
||||
"valid config": {
|
||||
args: config.RequestedToCapacityRatioArgs{
|
||||
Shape: []config.UtilizationShapePoint{
|
||||
{
|
||||
Utilization: 20,
|
||||
Score: 5,
|
||||
},
|
||||
{
|
||||
Utilization: 30,
|
||||
Score: 3,
|
||||
},
|
||||
{
|
||||
Utilization: 50,
|
||||
Score: 2,
|
||||
},
|
||||
},
|
||||
Resources: []config.ResourceSpec{
|
||||
{
|
||||
Name: "custom-resource",
|
||||
Weight: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"no shape points": {
|
||||
args: config.RequestedToCapacityRatioArgs{
|
||||
Shape: []config.UtilizationShapePoint{},
|
||||
Resources: []config.ResourceSpec{
|
||||
{
|
||||
Name: "custom",
|
||||
Weight: 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeRequired,
|
||||
Field: "shape",
|
||||
},
|
||||
},
|
||||
},
|
||||
"utilization less than min": {
|
||||
args: config.RequestedToCapacityRatioArgs{
|
||||
Shape: []config.UtilizationShapePoint{
|
||||
{
|
||||
Utilization: -10,
|
||||
Score: 3,
|
||||
},
|
||||
{
|
||||
Utilization: 10,
|
||||
Score: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "shape[0].utilization",
|
||||
},
|
||||
},
|
||||
},
|
||||
"utilization greater than max": {
|
||||
args: config.RequestedToCapacityRatioArgs{
|
||||
Shape: []config.UtilizationShapePoint{
|
||||
{
|
||||
Utilization: 10,
|
||||
Score: 3,
|
||||
},
|
||||
{
|
||||
Utilization: 110,
|
||||
Score: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "shape[1].utilization",
|
||||
},
|
||||
},
|
||||
},
|
||||
"utilization values in non-increasing order": {
|
||||
args: config.RequestedToCapacityRatioArgs{
|
||||
Shape: []config.UtilizationShapePoint{
|
||||
{
|
||||
Utilization: 30,
|
||||
Score: 3,
|
||||
},
|
||||
{
|
||||
Utilization: 20,
|
||||
Score: 2,
|
||||
},
|
||||
{
|
||||
Utilization: 10,
|
||||
Score: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "shape[1].utilization",
|
||||
},
|
||||
},
|
||||
},
|
||||
"duplicated utilization values": {
|
||||
args: config.RequestedToCapacityRatioArgs{
|
||||
Shape: []config.UtilizationShapePoint{
|
||||
{
|
||||
Utilization: 10,
|
||||
Score: 3,
|
||||
},
|
||||
{
|
||||
Utilization: 20,
|
||||
Score: 2,
|
||||
},
|
||||
{
|
||||
Utilization: 20,
|
||||
Score: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "shape[2].utilization",
|
||||
},
|
||||
},
|
||||
},
|
||||
"score less than min": {
|
||||
args: config.RequestedToCapacityRatioArgs{
|
||||
Shape: []config.UtilizationShapePoint{
|
||||
{
|
||||
Utilization: 10,
|
||||
Score: -1,
|
||||
},
|
||||
{
|
||||
Utilization: 20,
|
||||
Score: 2,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "shape[0].score",
|
||||
},
|
||||
},
|
||||
},
|
||||
"score greater than max": {
|
||||
args: config.RequestedToCapacityRatioArgs{
|
||||
Shape: []config.UtilizationShapePoint{
|
||||
{
|
||||
Utilization: 10,
|
||||
Score: 3,
|
||||
},
|
||||
{
|
||||
Utilization: 20,
|
||||
Score: 11,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "shape[1].score",
|
||||
},
|
||||
},
|
||||
},
|
||||
"resources weight less than 1": {
|
||||
args: config.RequestedToCapacityRatioArgs{
|
||||
Shape: []config.UtilizationShapePoint{
|
||||
{
|
||||
Utilization: 10,
|
||||
Score: 1,
|
||||
},
|
||||
},
|
||||
Resources: []config.ResourceSpec{
|
||||
{
|
||||
Name: "custom",
|
||||
Weight: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[0].weight",
|
||||
},
|
||||
},
|
||||
},
|
||||
"multiple errors": {
|
||||
args: config.RequestedToCapacityRatioArgs{
|
||||
Shape: []config.UtilizationShapePoint{
|
||||
{
|
||||
Utilization: 20,
|
||||
Score: -1,
|
||||
},
|
||||
{
|
||||
Utilization: 10,
|
||||
Score: 2,
|
||||
},
|
||||
},
|
||||
Resources: []config.ResourceSpec{
|
||||
{
|
||||
Name: "custom",
|
||||
Weight: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "shape[1].utilization",
|
||||
},
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "shape[0].score",
|
||||
},
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[0].weight",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
err := ValidateRequestedToCapacityRatioArgs(nil, &tc.args)
|
||||
if diff := cmp.Diff(tc.wantErrs.ToAggregate(), err, ignoreBadValueDetail); diff != "" {
|
||||
t.Errorf("ValidateRequestedToCapacityRatioArgs returned err (-want,+got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateNodeResourcesLeastAllocatedArgs(t *testing.T) {
|
||||
cases := map[string]struct {
|
||||
args *config.NodeResourcesLeastAllocatedArgs
|
||||
wantErrs field.ErrorList
|
||||
}{
|
||||
"valid config": {
|
||||
args: &config.NodeResourcesLeastAllocatedArgs{
|
||||
Resources: []config.ResourceSpec{
|
||||
{
|
||||
Name: "cpu",
|
||||
Weight: 50,
|
||||
},
|
||||
{
|
||||
Name: "memory",
|
||||
Weight: 30,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"weight less than min": {
|
||||
args: &config.NodeResourcesLeastAllocatedArgs{
|
||||
Resources: []config.ResourceSpec{
|
||||
{
|
||||
Name: "cpu",
|
||||
Weight: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[0].weight",
|
||||
},
|
||||
},
|
||||
},
|
||||
"weight more than max": {
|
||||
args: &config.NodeResourcesLeastAllocatedArgs{
|
||||
Resources: []config.ResourceSpec{
|
||||
{
|
||||
Name: "memory",
|
||||
Weight: 101,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[0].weight",
|
||||
},
|
||||
},
|
||||
},
|
||||
"multiple error": {
|
||||
args: &config.NodeResourcesLeastAllocatedArgs{
|
||||
Resources: []config.ResourceSpec{
|
||||
{
|
||||
Name: "memory",
|
||||
Weight: 0,
|
||||
},
|
||||
{
|
||||
Name: "cpu",
|
||||
Weight: 101,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[0].weight",
|
||||
},
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[1].weight",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
err := ValidateNodeResourcesLeastAllocatedArgs(nil, tc.args)
|
||||
if diff := cmp.Diff(tc.wantErrs.ToAggregate(), err, ignoreBadValueDetail); diff != "" {
|
||||
t.Errorf("ValidateNodeResourcesLeastAllocatedArgs returned err (-want,+got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateNodeResourcesMostAllocatedArgs(t *testing.T) {
|
||||
cases := map[string]struct {
|
||||
args *config.NodeResourcesMostAllocatedArgs
|
||||
wantErrs field.ErrorList
|
||||
}{
|
||||
"valid config": {
|
||||
args: &config.NodeResourcesMostAllocatedArgs{
|
||||
Resources: []config.ResourceSpec{
|
||||
{
|
||||
Name: "cpu",
|
||||
Weight: 70,
|
||||
},
|
||||
{
|
||||
Name: "memory",
|
||||
Weight: 40,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"weight less than min": {
|
||||
args: &config.NodeResourcesMostAllocatedArgs{
|
||||
Resources: []config.ResourceSpec{
|
||||
{
|
||||
Name: "cpu",
|
||||
Weight: -1,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[0].weight",
|
||||
},
|
||||
},
|
||||
},
|
||||
"weight more than max": {
|
||||
args: &config.NodeResourcesMostAllocatedArgs{
|
||||
Resources: []config.ResourceSpec{
|
||||
{
|
||||
Name: "memory",
|
||||
Weight: 110,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[0].weight",
|
||||
},
|
||||
},
|
||||
},
|
||||
"multiple error": {
|
||||
args: &config.NodeResourcesMostAllocatedArgs{
|
||||
Resources: []config.ResourceSpec{
|
||||
{
|
||||
Name: "memory",
|
||||
Weight: -1,
|
||||
},
|
||||
{
|
||||
Name: "cpu",
|
||||
Weight: 110,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[0].weight",
|
||||
},
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[1].weight",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range cases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
err := ValidateNodeResourcesMostAllocatedArgs(nil, tc.args)
|
||||
if diff := cmp.Diff(tc.wantErrs.ToAggregate(), err, ignoreBadValueDetail); diff != "" {
|
||||
t.Errorf("ValidateNodeResourcesLeastAllocatedArgs returned err (-want,+got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateNodeResourcesBalancedAllocationArgs(t *testing.T) {
|
||||
cases := map[string]struct {
|
||||
args *config.NodeResourcesBalancedAllocationArgs
|
||||
@ -1171,3 +755,381 @@ func TestValidateFitArgs(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateLeastAllocatedScoringStrategy(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
resources []config.ResourceSpec
|
||||
wantErrs field.ErrorList
|
||||
}{
|
||||
{
|
||||
name: "default config",
|
||||
wantErrs: nil,
|
||||
},
|
||||
{
|
||||
name: "multi valid resources",
|
||||
resources: []config.ResourceSpec{
|
||||
{
|
||||
Name: "cpu",
|
||||
Weight: 1,
|
||||
},
|
||||
{
|
||||
Name: "memory",
|
||||
Weight: 10,
|
||||
},
|
||||
},
|
||||
wantErrs: nil,
|
||||
},
|
||||
{
|
||||
name: "weight less than min",
|
||||
resources: []config.ResourceSpec{
|
||||
{
|
||||
Name: "cpu",
|
||||
Weight: 0,
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[0].weight",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "weight greater than max",
|
||||
resources: []config.ResourceSpec{
|
||||
{
|
||||
Name: "cpu",
|
||||
Weight: 101,
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[0].weight",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multi invalid resources",
|
||||
resources: []config.ResourceSpec{
|
||||
{
|
||||
Name: "cpu",
|
||||
Weight: 0,
|
||||
},
|
||||
{
|
||||
Name: "memory",
|
||||
Weight: 101,
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[0].weight",
|
||||
},
|
||||
{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[1].weight",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
args := config.NodeResourcesFitArgs{
|
||||
ScoringStrategy: &config.ScoringStrategy{
|
||||
Type: config.LeastAllocated,
|
||||
Resources: test.resources,
|
||||
},
|
||||
}
|
||||
err := ValidateNodeResourcesFitArgs(nil, &args)
|
||||
if diff := cmp.Diff(test.wantErrs.ToAggregate(), err, ignoreBadValueDetail); diff != "" {
|
||||
t.Errorf("ValidateNodeResourcesFitArgs returned err (-want,+got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateMostAllocatedScoringStrategy(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
resources []config.ResourceSpec
|
||||
wantErrs field.ErrorList
|
||||
}{
|
||||
{
|
||||
name: "default config",
|
||||
wantErrs: nil,
|
||||
},
|
||||
{
|
||||
name: "multi valid resources",
|
||||
resources: []config.ResourceSpec{
|
||||
{
|
||||
Name: "cpu",
|
||||
Weight: 1,
|
||||
},
|
||||
{
|
||||
Name: "memory",
|
||||
Weight: 10,
|
||||
},
|
||||
},
|
||||
wantErrs: nil,
|
||||
},
|
||||
{
|
||||
name: "weight less than min",
|
||||
resources: []config.ResourceSpec{
|
||||
{
|
||||
Name: "cpu",
|
||||
Weight: 0,
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[0].weight",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "weight greater than max",
|
||||
resources: []config.ResourceSpec{
|
||||
{
|
||||
Name: "cpu",
|
||||
Weight: 101,
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[0].weight",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multi invalid resources",
|
||||
resources: []config.ResourceSpec{
|
||||
{
|
||||
Name: "cpu",
|
||||
Weight: 0,
|
||||
},
|
||||
{
|
||||
Name: "memory",
|
||||
Weight: 101,
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[0].weight",
|
||||
},
|
||||
{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[1].weight",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
args := config.NodeResourcesFitArgs{
|
||||
ScoringStrategy: &config.ScoringStrategy{
|
||||
Type: config.MostAllocated,
|
||||
Resources: test.resources,
|
||||
},
|
||||
}
|
||||
err := ValidateNodeResourcesFitArgs(nil, &args)
|
||||
if diff := cmp.Diff(test.wantErrs.ToAggregate(), err, ignoreBadValueDetail); diff != "" {
|
||||
t.Errorf("ValidateNodeResourcesFitArgs returned err (-want,+got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateRequestedToCapacityRatioScoringStrategy(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
resources []config.ResourceSpec
|
||||
shapes []config.UtilizationShapePoint
|
||||
wantErrs field.ErrorList
|
||||
}{
|
||||
{
|
||||
name: "weight greater than max",
|
||||
resources: []config.ResourceSpec{
|
||||
{
|
||||
Name: "cpu",
|
||||
Weight: 101,
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[0].weight",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "weight less than min",
|
||||
resources: []config.ResourceSpec{
|
||||
{
|
||||
Name: "cpu",
|
||||
Weight: 0,
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[0].weight",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "valid shapes",
|
||||
shapes: []config.UtilizationShapePoint{
|
||||
{
|
||||
Utilization: 30,
|
||||
Score: 3,
|
||||
},
|
||||
},
|
||||
wantErrs: nil,
|
||||
},
|
||||
{
|
||||
name: "utilization less than min",
|
||||
shapes: []config.UtilizationShapePoint{
|
||||
{
|
||||
Utilization: -1,
|
||||
Score: 3,
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "shape[0].utilization",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "utilization greater than max",
|
||||
shapes: []config.UtilizationShapePoint{
|
||||
{
|
||||
Utilization: 101,
|
||||
Score: 3,
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "shape[0].utilization",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "duplicated utilization values",
|
||||
shapes: []config.UtilizationShapePoint{
|
||||
{
|
||||
Utilization: 10,
|
||||
Score: 3,
|
||||
},
|
||||
{
|
||||
Utilization: 10,
|
||||
Score: 3,
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "shape[1].utilization",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "increasing utilization values",
|
||||
shapes: []config.UtilizationShapePoint{
|
||||
{
|
||||
Utilization: 10,
|
||||
Score: 3,
|
||||
},
|
||||
{
|
||||
Utilization: 20,
|
||||
Score: 3,
|
||||
},
|
||||
{
|
||||
Utilization: 30,
|
||||
Score: 3,
|
||||
},
|
||||
},
|
||||
wantErrs: nil,
|
||||
},
|
||||
{
|
||||
name: "non-increasing utilization values",
|
||||
shapes: []config.UtilizationShapePoint{
|
||||
{
|
||||
Utilization: 10,
|
||||
Score: 3,
|
||||
},
|
||||
{
|
||||
Utilization: 20,
|
||||
Score: 3,
|
||||
},
|
||||
{
|
||||
Utilization: 15,
|
||||
Score: 3,
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "shape[2].utilization",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "score less than min",
|
||||
shapes: []config.UtilizationShapePoint{
|
||||
{
|
||||
Utilization: 10,
|
||||
Score: -1,
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "shape[0].score",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "score greater than max",
|
||||
shapes: []config.UtilizationShapePoint{
|
||||
{
|
||||
Utilization: 10,
|
||||
Score: 11,
|
||||
},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "shape[0].score",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
args := config.NodeResourcesFitArgs{
|
||||
ScoringStrategy: &config.ScoringStrategy{
|
||||
Type: config.RequestedToCapacityRatio,
|
||||
Resources: test.resources,
|
||||
RequestedToCapacityRatio: &config.RequestedToCapacityRatioParam{
|
||||
Shape: test.shapes,
|
||||
},
|
||||
},
|
||||
}
|
||||
err := ValidateNodeResourcesFitArgs(nil, &args)
|
||||
if diff := cmp.Diff(test.wantErrs.ToAggregate(), err, ignoreBadValueDetail); diff != "" {
|
||||
t.Errorf("ValidateNodeResourcesFitArgs returned err (-want,+got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -198,21 +198,9 @@ func TestValidateKubeSchedulerConfigurationV1beta2(t *testing.T) {
|
||||
BindVerb: "bar",
|
||||
})
|
||||
|
||||
badRemovedPlugins1 := validConfig.DeepCopy()
|
||||
badRemovedPlugins1.Profiles[0].Plugins.Score.Enabled = append(badRemovedPlugins1.Profiles[0].Plugins.Score.Enabled, config.Plugin{Name: "NodeResourcesLeastAllocated", Weight: 2})
|
||||
|
||||
badRemovedPlugins3 := validConfig.DeepCopy()
|
||||
badRemovedPlugins3.Profiles[0].Plugins.Score.Enabled = append(badRemovedPlugins3.Profiles[0].Plugins.Score.Enabled, config.Plugin{Name: "NodeResourcesMostAllocated", Weight: 2})
|
||||
|
||||
goodRemovedPlugins2 := validConfig.DeepCopy()
|
||||
goodRemovedPlugins2.Profiles[0].Plugins.Score.Enabled = append(goodRemovedPlugins2.Profiles[0].Plugins.Score.Enabled, config.Plugin{Name: "PodTopologySpread", Weight: 2})
|
||||
|
||||
deprecatedPluginsConfig := validConfig.DeepCopy()
|
||||
deprecatedPluginsConfig.Profiles[0].PluginConfig = append(deprecatedPluginsConfig.Profiles[0].PluginConfig, config.PluginConfig{
|
||||
Name: "NodeResourcesLeastAllocated",
|
||||
Args: &config.NodeResourcesLeastAllocatedArgs{},
|
||||
})
|
||||
|
||||
scenarios := map[string]struct {
|
||||
expectedToFail bool
|
||||
config *config.KubeSchedulerConfiguration
|
||||
@ -290,23 +278,10 @@ func TestValidateKubeSchedulerConfigurationV1beta2(t *testing.T) {
|
||||
expectedToFail: true,
|
||||
config: mismatchQueueSort,
|
||||
},
|
||||
"bad-removed-plugins-1": {
|
||||
expectedToFail: true,
|
||||
config: badRemovedPlugins1,
|
||||
},
|
||||
"bad-removed-plugins-3": {
|
||||
expectedToFail: true,
|
||||
config: badRemovedPlugins3,
|
||||
},
|
||||
"good-removed-plugins-2": {
|
||||
expectedToFail: false,
|
||||
config: goodRemovedPlugins2,
|
||||
},
|
||||
"bad-plugins-config": {
|
||||
expectedToFail: true,
|
||||
config: deprecatedPluginsConfig,
|
||||
errorString: "profiles[0].pluginConfig[1]: Invalid value: \"NodeResourcesLeastAllocated\": was removed in version \"kubescheduler.config.k8s.io/v1beta2\" (KubeSchedulerConfiguration is version \"kubescheduler.config.k8s.io/v1beta2\")",
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
@ -497,24 +472,9 @@ func TestValidateKubeSchedulerConfigurationV1beta3(t *testing.T) {
|
||||
BindVerb: "bar",
|
||||
})
|
||||
|
||||
badRemovedPlugins1 := validConfig.DeepCopy()
|
||||
badRemovedPlugins1.Profiles[0].Plugins.Score.Enabled = append(badRemovedPlugins1.Profiles[0].Plugins.Score.Enabled, config.Plugin{Name: "NodeResourcesLeastAllocated", Weight: 2})
|
||||
|
||||
badRemovedPlugins2 := validConfig.DeepCopy()
|
||||
badRemovedPlugins2.Profiles[0].Plugins.Score.Enabled = append(badRemovedPlugins2.Profiles[0].Plugins.Score.Enabled, config.Plugin{Name: "RequestedToCapacityRatio", Weight: 2})
|
||||
|
||||
badRemovedPlugins3 := validConfig.DeepCopy()
|
||||
badRemovedPlugins3.Profiles[0].Plugins.Score.Enabled = append(badRemovedPlugins3.Profiles[0].Plugins.Score.Enabled, config.Plugin{Name: "NodeResourcesMostAllocated", Weight: 2})
|
||||
|
||||
goodRemovedPlugins2 := validConfig.DeepCopy()
|
||||
goodRemovedPlugins2.Profiles[0].Plugins.Score.Enabled = append(goodRemovedPlugins2.Profiles[0].Plugins.Score.Enabled, config.Plugin{Name: "PodTopologySpread", Weight: 2})
|
||||
|
||||
deprecatedPluginsConfig := validConfig.DeepCopy()
|
||||
deprecatedPluginsConfig.Profiles[0].PluginConfig = append(deprecatedPluginsConfig.Profiles[0].PluginConfig, config.PluginConfig{
|
||||
Name: "NodeResourcesLeastAllocated",
|
||||
Args: &config.NodeResourcesLeastAllocatedArgs{},
|
||||
})
|
||||
|
||||
scenarios := map[string]struct {
|
||||
expectedToFail bool
|
||||
config *config.KubeSchedulerConfiguration
|
||||
@ -592,27 +552,10 @@ func TestValidateKubeSchedulerConfigurationV1beta3(t *testing.T) {
|
||||
expectedToFail: true,
|
||||
config: mismatchQueueSort,
|
||||
},
|
||||
"bad-removed-plugins-1": {
|
||||
expectedToFail: true,
|
||||
config: badRemovedPlugins1,
|
||||
},
|
||||
"bad-removed-plugins-2": {
|
||||
expectedToFail: true,
|
||||
config: badRemovedPlugins2,
|
||||
},
|
||||
"bad-removed-plugins-3": {
|
||||
expectedToFail: true,
|
||||
config: badRemovedPlugins3,
|
||||
},
|
||||
"good-removed-plugins-2": {
|
||||
expectedToFail: false,
|
||||
config: goodRemovedPlugins2,
|
||||
},
|
||||
"bad-plugins-config": {
|
||||
expectedToFail: true,
|
||||
config: deprecatedPluginsConfig,
|
||||
errorString: "profiles[0].pluginConfig[1]: Invalid value: \"NodeResourcesLeastAllocated\": was removed in version \"kubescheduler.config.k8s.io/v1beta2\" (KubeSchedulerConfiguration is version \"kubescheduler.config.k8s.io/v1beta3\")",
|
||||
},
|
||||
}
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
|
95
pkg/scheduler/apis/config/zz_generated.deepcopy.go
generated
95
pkg/scheduler/apis/config/zz_generated.deepcopy.go
generated
@ -320,66 +320,6 @@ func (in *NodeResourcesFitArgs) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeResourcesLeastAllocatedArgs) DeepCopyInto(out *NodeResourcesLeastAllocatedArgs) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Resources != nil {
|
||||
in, out := &in.Resources, &out.Resources
|
||||
*out = make([]ResourceSpec, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesLeastAllocatedArgs.
|
||||
func (in *NodeResourcesLeastAllocatedArgs) DeepCopy() *NodeResourcesLeastAllocatedArgs {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NodeResourcesLeastAllocatedArgs)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *NodeResourcesLeastAllocatedArgs) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NodeResourcesMostAllocatedArgs) DeepCopyInto(out *NodeResourcesMostAllocatedArgs) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Resources != nil {
|
||||
in, out := &in.Resources, &out.Resources
|
||||
*out = make([]ResourceSpec, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesMostAllocatedArgs.
|
||||
func (in *NodeResourcesMostAllocatedArgs) DeepCopy() *NodeResourcesMostAllocatedArgs {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NodeResourcesMostAllocatedArgs)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *NodeResourcesMostAllocatedArgs) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Plugin) DeepCopyInto(out *Plugin) {
|
||||
*out = *in
|
||||
@ -500,41 +440,6 @@ func (in *PodTopologySpreadArgs) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RequestedToCapacityRatioArgs) DeepCopyInto(out *RequestedToCapacityRatioArgs) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Shape != nil {
|
||||
in, out := &in.Shape, &out.Shape
|
||||
*out = make([]UtilizationShapePoint, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Resources != nil {
|
||||
in, out := &in.Resources, &out.Resources
|
||||
*out = make([]ResourceSpec, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestedToCapacityRatioArgs.
|
||||
func (in *RequestedToCapacityRatioArgs) DeepCopy() *RequestedToCapacityRatioArgs {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RequestedToCapacityRatioArgs)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *RequestedToCapacityRatioArgs) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RequestedToCapacityRatioParam) DeepCopyInto(out *RequestedToCapacityRatioParam) {
|
||||
*out = *in
|
||||
|
@ -27,9 +27,6 @@ const (
|
||||
NodePorts = "NodePorts"
|
||||
NodeResourcesBalancedAllocation = "NodeResourcesBalancedAllocation"
|
||||
NodeResourcesFit = "NodeResourcesFit"
|
||||
NodeResourcesLeastAllocated = "NodeResourcesLeastAllocated"
|
||||
NodeResourcesMostAllocated = "NodeResourcesMostAllocated"
|
||||
RequestedToCapacityRatio = "RequestedToCapacityRatio"
|
||||
NodeUnschedulable = "NodeUnschedulable"
|
||||
NodeVolumeLimits = "NodeVolumeLimits"
|
||||
AzureDiskLimits = "AzureDiskLimits"
|
||||
|
@ -51,7 +51,7 @@ var nodeResourceStrategyTypeMap = map[config.ScoringStrategyType]scorer{
|
||||
config.LeastAllocated: func(args *config.NodeResourcesFitArgs) *resourceAllocationScorer {
|
||||
resToWeightMap := resourcesToWeightMap(args.ScoringStrategy.Resources)
|
||||
return &resourceAllocationScorer{
|
||||
Name: LeastAllocatedName,
|
||||
Name: string(config.LeastAllocated),
|
||||
scorer: leastResourceScorer(resToWeightMap),
|
||||
resourceToWeightMap: resToWeightMap,
|
||||
}
|
||||
@ -59,7 +59,7 @@ var nodeResourceStrategyTypeMap = map[config.ScoringStrategyType]scorer{
|
||||
config.MostAllocated: func(args *config.NodeResourcesFitArgs) *resourceAllocationScorer {
|
||||
resToWeightMap := resourcesToWeightMap(args.ScoringStrategy.Resources)
|
||||
return &resourceAllocationScorer{
|
||||
Name: MostAllocatedName,
|
||||
Name: string(config.MostAllocated),
|
||||
scorer: mostResourceScorer(resToWeightMap),
|
||||
resourceToWeightMap: resToWeightMap,
|
||||
}
|
||||
@ -67,7 +67,7 @@ var nodeResourceStrategyTypeMap = map[config.ScoringStrategyType]scorer{
|
||||
config.RequestedToCapacityRatio: func(args *config.NodeResourcesFitArgs) *resourceAllocationScorer {
|
||||
resToWeightMap := resourcesToWeightMap(args.ScoringStrategy.Resources)
|
||||
return &resourceAllocationScorer{
|
||||
Name: RequestedToCapacityRatioName,
|
||||
Name: string(config.RequestedToCapacityRatio),
|
||||
scorer: requestedToCapacityRatioScorer(resToWeightMap, args.ScoringStrategy.RequestedToCapacityRatio.Shape),
|
||||
resourceToWeightMap: resToWeightMap,
|
||||
}
|
||||
@ -118,8 +118,8 @@ func NewFit(plArgs runtime.Object, h framework.Handle, fts feature.Features) (fr
|
||||
}
|
||||
|
||||
strategy := args.ScoringStrategy.Type
|
||||
scorePlugin, ok := nodeResourceStrategyTypeMap[strategy]
|
||||
if !ok {
|
||||
scorePlugin, exists := nodeResourceStrategyTypeMap[strategy]
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("scoring strategy %s is not supported", strategy)
|
||||
}
|
||||
|
||||
@ -229,8 +229,8 @@ func (f *Fit) Filter(ctx context.Context, cycleState *framework.CycleState, pod
|
||||
if len(insufficientResources) != 0 {
|
||||
// We will keep all failure reasons.
|
||||
failureReasons := make([]string, 0, len(insufficientResources))
|
||||
for _, r := range insufficientResources {
|
||||
failureReasons = append(failureReasons, r.Reason)
|
||||
for i := range insufficientResources {
|
||||
failureReasons = append(failureReasons, insufficientResources[i].Reason)
|
||||
}
|
||||
return framework.NewStatus(framework.Unschedulable, failureReasons...)
|
||||
}
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
plfeature "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -598,29 +599,37 @@ func TestStorageRequests(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFitScore(t *testing.T) {
|
||||
type test struct {
|
||||
defaultResources := []config.ResourceSpec{
|
||||
{Name: string(v1.ResourceCPU), Weight: 1},
|
||||
{Name: string(v1.ResourceMemory), Weight: 1},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
requestedPod *v1.Pod
|
||||
nodes []*v1.Node
|
||||
scheduledPods []*v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
expectedPriorities framework.NodeScoreList
|
||||
nodeResourcesFitArgs config.NodeResourcesFitArgs
|
||||
}
|
||||
|
||||
tests := []test{
|
||||
}{
|
||||
{
|
||||
name: "test case for ScoringStrategy RequestedToCapacityRatio case1",
|
||||
requestedPod: makePod("", 3000, 5000),
|
||||
nodes: []*v1.Node{makeNode("node1", 4000, 10000), makeNode("node2", 6000, 10000)},
|
||||
scheduledPods: []*v1.Pod{makePod("node1", 2000, 4000), makePod("node2", 1000, 2000)},
|
||||
name: "test case for ScoringStrategy RequestedToCapacityRatio case1",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "4000"}).Obj(),
|
||||
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
|
||||
},
|
||||
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 10}, {Name: "node2", Score: 32}},
|
||||
nodeResourcesFitArgs: config.NodeResourcesFitArgs{
|
||||
ScoringStrategy: &config.ScoringStrategy{
|
||||
Type: config.RequestedToCapacityRatio,
|
||||
Resources: []config.ResourceSpec{
|
||||
{Name: "memory", Weight: 1},
|
||||
{Name: "cpu", Weight: 1},
|
||||
},
|
||||
Type: config.RequestedToCapacityRatio,
|
||||
Resources: defaultResources,
|
||||
RequestedToCapacityRatio: &config.RequestedToCapacityRatioParam{
|
||||
Shape: []config.UtilizationShapePoint{
|
||||
{Utilization: 0, Score: 10},
|
||||
@ -631,18 +640,23 @@ func TestFitScore(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test case for ScoringStrategy RequestedToCapacityRatio case2",
|
||||
requestedPod: makePod("", 3000, 5000),
|
||||
nodes: []*v1.Node{makeNode("node1", 4000, 10000), makeNode("node2", 6000, 10000)},
|
||||
scheduledPods: []*v1.Pod{makePod("node1", 2000, 4000), makePod("node2", 1000, 2000)},
|
||||
name: "test case for ScoringStrategy RequestedToCapacityRatio case2",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "4000"}).Obj(),
|
||||
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
|
||||
},
|
||||
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 95}, {Name: "node2", Score: 68}},
|
||||
nodeResourcesFitArgs: config.NodeResourcesFitArgs{
|
||||
ScoringStrategy: &config.ScoringStrategy{
|
||||
Type: config.RequestedToCapacityRatio,
|
||||
Resources: []config.ResourceSpec{
|
||||
{Name: "memory", Weight: 1},
|
||||
{Name: "cpu", Weight: 1},
|
||||
},
|
||||
Type: config.RequestedToCapacityRatio,
|
||||
Resources: defaultResources,
|
||||
RequestedToCapacityRatio: &config.RequestedToCapacityRatioParam{
|
||||
Shape: []config.UtilizationShapePoint{
|
||||
{Utilization: 0, Score: 0},
|
||||
@ -653,34 +667,44 @@ func TestFitScore(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test case for ScoringStrategy MostAllocated",
|
||||
requestedPod: makePod("", 1000, 2000),
|
||||
nodes: []*v1.Node{makeNode("node1", 4000, 10000), makeNode("node2", 6000, 10000)},
|
||||
scheduledPods: []*v1.Pod{makePod("node1", 2000, 4000), makePod("node2", 1000, 2000)},
|
||||
name: "test case for ScoringStrategy MostAllocated",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "4000"}).Obj(),
|
||||
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
|
||||
},
|
||||
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 67}, {Name: "node2", Score: 36}},
|
||||
nodeResourcesFitArgs: config.NodeResourcesFitArgs{
|
||||
ScoringStrategy: &config.ScoringStrategy{
|
||||
Type: config.MostAllocated,
|
||||
Resources: []config.ResourceSpec{
|
||||
{Name: "memory", Weight: 1},
|
||||
{Name: "cpu", Weight: 1},
|
||||
},
|
||||
Type: config.MostAllocated,
|
||||
Resources: defaultResources,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "test case for ScoringStrategy LeastAllocated",
|
||||
requestedPod: makePod("", 1000, 2000),
|
||||
nodes: []*v1.Node{makeNode("node1", 4000, 10000), makeNode("node2", 6000, 10000)},
|
||||
scheduledPods: []*v1.Pod{makePod("node1", 2000, 4000), makePod("node2", 1000, 2000)},
|
||||
name: "test case for ScoringStrategy LeastAllocated",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "4000"}).Obj(),
|
||||
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
|
||||
},
|
||||
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 32}, {Name: "node2", Score: 63}},
|
||||
nodeResourcesFitArgs: config.NodeResourcesFitArgs{
|
||||
ScoringStrategy: &config.ScoringStrategy{
|
||||
Type: config.LeastAllocated,
|
||||
Resources: []config.ResourceSpec{
|
||||
{Name: "memory", Weight: 1},
|
||||
{Name: "cpu", Weight: 1},
|
||||
},
|
||||
Type: config.LeastAllocated,
|
||||
Resources: defaultResources,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -689,7 +713,7 @@ func TestFitScore(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
state := framework.NewCycleState()
|
||||
snapshot := cache.NewSnapshot(test.scheduledPods, test.nodes)
|
||||
snapshot := cache.NewSnapshot(test.existingPods, test.nodes)
|
||||
fh, _ := runtime.NewFramework(nil, nil, runtime.WithSnapshotSharedLister(snapshot))
|
||||
args := test.nodeResourcesFitArgs
|
||||
p, err := NewFit(&args, fh, plfeature.Features{EnablePodOverhead: true})
|
||||
|
@ -17,81 +17,15 @@ limitations under the License.
|
||||
package noderesources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
|
||||
)
|
||||
|
||||
// LeastAllocated is a score plugin that favors nodes with fewer allocation requested resources based on requested resources.
|
||||
type LeastAllocated struct {
|
||||
handle framework.Handle
|
||||
resourceAllocationScorer
|
||||
}
|
||||
|
||||
var _ = framework.ScorePlugin(&LeastAllocated{})
|
||||
|
||||
// LeastAllocatedName is the name of the plugin used in the plugin registry and configurations.
|
||||
const LeastAllocatedName = names.NodeResourcesLeastAllocated
|
||||
|
||||
// Name returns name of the plugin. It is used in logs, etc.
|
||||
func (la *LeastAllocated) Name() string {
|
||||
return LeastAllocatedName
|
||||
}
|
||||
|
||||
// Score invoked at the score extension point.
|
||||
func (la *LeastAllocated) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) {
|
||||
nodeInfo, err := la.handle.SnapshotSharedLister().NodeInfos().Get(nodeName)
|
||||
if err != nil {
|
||||
return 0, framework.AsStatus(fmt.Errorf("getting node %q from Snapshot: %w", nodeName, err))
|
||||
}
|
||||
|
||||
// la.score favors nodes with fewer requested resources.
|
||||
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and
|
||||
// prioritizes based on the minimum of the average of the fraction of requested to capacity.
|
||||
//
|
||||
// Details:
|
||||
// (cpu((capacity-sum(requested))*MaxNodeScore/capacity) + memory((capacity-sum(requested))*MaxNodeScore/capacity))/weightSum
|
||||
return la.score(pod, nodeInfo)
|
||||
}
|
||||
|
||||
// ScoreExtensions of the Score plugin.
|
||||
func (la *LeastAllocated) ScoreExtensions() framework.ScoreExtensions {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewLeastAllocated initializes a new plugin and returns it.
|
||||
func NewLeastAllocated(laArgs runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) {
|
||||
args, ok := laArgs.(*config.NodeResourcesLeastAllocatedArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type NodeResourcesLeastAllocatedArgs, got %T", laArgs)
|
||||
}
|
||||
if err := validation.ValidateNodeResourcesLeastAllocatedArgs(nil, args); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resToWeightMap := make(resourceToWeightMap)
|
||||
for _, resource := range (*args).Resources {
|
||||
resToWeightMap[v1.ResourceName(resource.Name)] = resource.Weight
|
||||
}
|
||||
|
||||
return &LeastAllocated{
|
||||
handle: h,
|
||||
resourceAllocationScorer: resourceAllocationScorer{
|
||||
Name: LeastAllocatedName,
|
||||
scorer: leastResourceScorer(resToWeightMap),
|
||||
resourceToWeightMap: resToWeightMap,
|
||||
enablePodOverhead: fts.EnablePodOverhead,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// leastResourceScorer favors nodes with fewer requested resources.
|
||||
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and
|
||||
// prioritizes based on the minimum of the average of the fraction of requested to capacity.
|
||||
//
|
||||
// Details:
|
||||
// (cpu((capacity-sum(requested))*MaxNodeScore/capacity) + memory((capacity-sum(requested))*MaxNodeScore/capacity))/weightSum
|
||||
func leastResourceScorer(resToWeightMap resourceToWeightMap) func(resourceToValueMap, resourceToValueMap) int64 {
|
||||
return func(requested, allocable resourceToValueMap) int64 {
|
||||
var nodeScore, weightSum int64
|
||||
|
@ -18,104 +18,40 @@ package noderesources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||
plfeature "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
)
|
||||
|
||||
func TestNodeResourcesLeastAllocated(t *testing.T) {
|
||||
labels1 := map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "blah",
|
||||
}
|
||||
labels2 := map[string]string{
|
||||
"bar": "foo",
|
||||
"baz": "blah",
|
||||
}
|
||||
machine1Spec := v1.PodSpec{
|
||||
NodeName: "machine1",
|
||||
}
|
||||
machine2Spec := v1.PodSpec{
|
||||
NodeName: "machine2",
|
||||
}
|
||||
noResources := v1.PodSpec{
|
||||
Containers: []v1.Container{},
|
||||
}
|
||||
cpuOnly := v1.PodSpec{
|
||||
NodeName: "machine1",
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("1000m"),
|
||||
v1.ResourceMemory: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("2000m"),
|
||||
v1.ResourceMemory: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
cpuOnly2 := cpuOnly
|
||||
cpuOnly2.NodeName = "machine2"
|
||||
cpuAndMemory := v1.PodSpec{
|
||||
NodeName: "machine2",
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("1000m"),
|
||||
v1.ResourceMemory: resource.MustParse("2000"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("2000m"),
|
||||
v1.ResourceMemory: resource.MustParse("3000"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
defaultResourceLeastAllocatedSet := []config.ResourceSpec{
|
||||
func TestLeastAllocatedScoringStrategy(t *testing.T) {
|
||||
defaultResources := []config.ResourceSpec{
|
||||
{Name: string(v1.ResourceCPU), Weight: 1},
|
||||
{Name: string(v1.ResourceMemory), Weight: 1},
|
||||
}
|
||||
|
||||
extendedRes := "abc.com/xyz"
|
||||
extendedResourceLeastAllocatedSet := []config.ResourceSpec{
|
||||
{Name: string(v1.ResourceCPU), Weight: 1},
|
||||
{Name: string(v1.ResourceMemory), Weight: 1},
|
||||
{Name: extendedRes, Weight: 1},
|
||||
}
|
||||
cpuMemoryAndExtendedRes := *cpuAndMemory.DeepCopy()
|
||||
cpuMemoryAndExtendedRes.Containers[0].Resources.Requests[v1.ResourceName(extendedRes)] = resource.MustParse("2")
|
||||
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
args config.NodeResourcesLeastAllocatedArgs
|
||||
wantErr error
|
||||
expectedList framework.NodeScoreList
|
||||
name string
|
||||
name string
|
||||
requestedPod *v1.Pod
|
||||
nodes []*v1.Node
|
||||
existingPods []*v1.Pod
|
||||
expectedScores framework.NodeScoreList
|
||||
resources []config.ResourceSpec
|
||||
wantErrs field.ErrorList
|
||||
}{
|
||||
{
|
||||
// Node1 scores (remaining resources) on 0-MaxNodeScore scale
|
||||
@ -126,11 +62,15 @@ func TestNodeResourcesLeastAllocated(t *testing.T) {
|
||||
// CPU Score: ((4000 - 0) * MaxNodeScore) / 4000 = MaxNodeScore
|
||||
// Memory Score: ((10000 - 0) * MaxNodeScore) / 10000 = MaxNodeScore
|
||||
// Node2 Score: (MaxNodeScore + MaxNodeScore) / 2 = MaxNodeScore
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
args: config.NodeResourcesLeastAllocatedArgs{Resources: defaultResourceLeastAllocatedSet},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
|
||||
name: "nothing scheduled, nothing requested",
|
||||
requestedPod: st.MakePod().Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
existingPods: nil,
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}},
|
||||
resources: defaultResources,
|
||||
},
|
||||
{
|
||||
// Node1 scores on 0-MaxNodeScore scale
|
||||
@ -141,18 +81,32 @@ func TestNodeResourcesLeastAllocated(t *testing.T) {
|
||||
// CPU Score: ((6000 - 3000) * MaxNodeScore) / 6000 = 50
|
||||
// Memory Score: ((10000 - 5000) * MaxNodeScore) / 10000 = 50
|
||||
// Node2 Score: (50 + 50) / 2 = 50
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||
args: config.NodeResourcesLeastAllocatedArgs{Resources: defaultResourceLeastAllocatedSet},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 37}, {Name: "machine2", Score: 50}},
|
||||
name: "nothing scheduled, resources requested, differently sized machines",
|
||||
name: "nothing scheduled, resources requested, differently sized machines",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
existingPods: nil,
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 37}, {Name: "node2", Score: 50}},
|
||||
resources: defaultResources,
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||
args: config.NodeResourcesLeastAllocatedArgs{Resources: []config.ResourceSpec{}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
name: "Resources not set, nothing scheduled, resources requested, differently sized machines",
|
||||
name: "Resources not set, nothing scheduled, resources requested, differently sized machines",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
existingPods: nil,
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: framework.MinNodeScore}, {Name: "node2", Score: framework.MinNodeScore}},
|
||||
resources: nil,
|
||||
},
|
||||
{
|
||||
// Node1 scores on 0-MaxNodeScore scale
|
||||
@ -163,17 +117,20 @@ func TestNodeResourcesLeastAllocated(t *testing.T) {
|
||||
// CPU Score: ((4000 - 0) * MaxNodeScore) / 4000 = MaxNodeScore
|
||||
// Memory Score: ((10000 - 0) * MaxNodeScore) / 10000 = MaxNodeScore
|
||||
// Node2 Score: (MaxNodeScore + MaxNodeScore) / 2 = MaxNodeScore
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
args: config.NodeResourcesLeastAllocatedArgs{Resources: defaultResourceLeastAllocatedSet},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
|
||||
name: "no resources requested, pods scheduled",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: machine1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: machine2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: machine2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
requestedPod: st.MakePod().Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
st.MakePod().Node("node1").Obj(),
|
||||
st.MakePod().Node("node1").Obj(),
|
||||
st.MakePod().Node("node2").Obj(),
|
||||
st.MakePod().Node("node2").Obj(),
|
||||
},
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}},
|
||||
resources: defaultResources,
|
||||
},
|
||||
{
|
||||
// Node1 scores on 0-MaxNodeScore scale
|
||||
@ -184,17 +141,20 @@ func TestNodeResourcesLeastAllocated(t *testing.T) {
|
||||
// CPU Score: ((10000 - 6000) * MaxNodeScore) / 10000 = 40
|
||||
// Memory Score: ((20000 - 5000) * MaxNodeScore) / 20000 = 75
|
||||
// Node2 Score: (40 + 75) / 2 = 57
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
args: config.NodeResourcesLeastAllocatedArgs{Resources: defaultResourceLeastAllocatedSet},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 70}, {Name: "machine2", Score: 57}},
|
||||
name: "no resources requested, pods scheduled with resources",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: cpuOnly2, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: cpuAndMemory, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
requestedPod: st.MakePod().Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "10000", "memory": "20000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "10000", "memory": "20000"}).Obj(),
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "0"}).Obj(),
|
||||
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "0"}).Obj(),
|
||||
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "0"}).Obj(),
|
||||
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}).Obj(),
|
||||
},
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 70}, {Name: "node2", Score: 57}},
|
||||
resources: defaultResources,
|
||||
},
|
||||
{
|
||||
// Node1 scores on 0-MaxNodeScore scale
|
||||
@ -205,15 +165,21 @@ func TestNodeResourcesLeastAllocated(t *testing.T) {
|
||||
// CPU Score: ((10000 - 6000) * MaxNodeScore) / 10000 = 40
|
||||
// Memory Score: ((20000 - 10000) * MaxNodeScore) / 20000 = 50
|
||||
// Node2 Score: (40 + 50) / 2 = 45
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
args: config.NodeResourcesLeastAllocatedArgs{Resources: defaultResourceLeastAllocatedSet},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 57}, {Name: "machine2", Score: 45}},
|
||||
name: "resources requested, pods scheduled with resources",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
name: "resources requested, pods scheduled with resources",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "10000", "memory": "20000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "10000", "memory": "20000"}).Obj(),
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "0"}).Obj(),
|
||||
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}).Obj(),
|
||||
},
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 57}, {Name: "node2", Score: 45}},
|
||||
resources: defaultResources,
|
||||
},
|
||||
{
|
||||
// Node1 scores on 0-MaxNodeScore scale
|
||||
@ -224,15 +190,21 @@ func TestNodeResourcesLeastAllocated(t *testing.T) {
|
||||
// CPU Score: ((10000 - 6000) * MaxNodeScore) / 10000 = 40
|
||||
// Memory Score: ((50000 - 10000) * MaxNodeScore) / 50000 = 80
|
||||
// Node2 Score: (40 + 80) / 2 = 60
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
|
||||
args: config.NodeResourcesLeastAllocatedArgs{Resources: defaultResourceLeastAllocatedSet},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 57}, {Name: "machine2", Score: 60}},
|
||||
name: "resources requested, pods scheduled with resources, differently sized machines",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
name: "resources requested, pods scheduled with resources, differently sized machines",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "10000", "memory": "20000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "10000", "memory": "50000"}).Obj(),
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "0"}).Obj(),
|
||||
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}).Obj(),
|
||||
},
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 57}, {Name: "node2", Score: 60}},
|
||||
resources: defaultResources,
|
||||
},
|
||||
{
|
||||
// Node1 scores on 0-MaxNodeScore scale
|
||||
@ -243,26 +215,32 @@ func TestNodeResourcesLeastAllocated(t *testing.T) {
|
||||
// CPU Score: ((4000 - 6000) * MaxNodeScore) / 4000 = 0
|
||||
// Memory Score: ((10000 - 5000) * MaxNodeScore) / 10000 = 50
|
||||
// Node2 Score: (0 + 50) / 2 = 25
|
||||
pod: &v1.Pod{Spec: cpuOnly},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
args: config.NodeResourcesLeastAllocatedArgs{Resources: defaultResourceLeastAllocatedSet},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 50}, {Name: "machine2", Score: 25}},
|
||||
name: "requested resources exceed node capacity",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
requestedPod: st.MakePod().Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "0"}).Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "0"}).Obj(),
|
||||
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}).Obj(),
|
||||
},
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 50}, {Name: "node2", Score: 25}},
|
||||
resources: defaultResources,
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
|
||||
args: config.NodeResourcesLeastAllocatedArgs{Resources: defaultResourceLeastAllocatedSet},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
name: "zero node resources, pods scheduled with resources",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
requestedPod: st.MakePod().Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Obj(),
|
||||
st.MakeNode().Name("node2").Obj(),
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "0"}).Obj(),
|
||||
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}).Obj(),
|
||||
},
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: framework.MinNodeScore}, {Name: "node2", Score: framework.MinNodeScore}},
|
||||
resources: defaultResources,
|
||||
},
|
||||
{
|
||||
// CPU Score: ((4000 - 3000) *100) / 4000 = 25
|
||||
@ -271,50 +249,86 @@ func TestNodeResourcesLeastAllocated(t *testing.T) {
|
||||
// CPU Score: ((6000 - 3000) *100) / 6000 = 50
|
||||
// Memory Score: ((10000 - 5000) *100) / 10000 = 50
|
||||
// Node2 Score: (50 * 1 + 50 * 2) / (1 + 2) = 50
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||
args: config.NodeResourcesLeastAllocatedArgs{Resources: []config.ResourceSpec{{Name: "memory", Weight: 2}, {Name: "cpu", Weight: 1}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 41}, {Name: "machine2", Score: 50}},
|
||||
name: "nothing scheduled, resources requested with different weight on CPU and memory, differently sized machines",
|
||||
name: "nothing scheduled, resources requested with different weight on CPU and memory, differently sized machines",
|
||||
requestedPod: st.MakePod().Node("node1").
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
existingPods: nil,
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 41}, {Name: "node2", Score: 50}},
|
||||
resources: []config.ResourceSpec{
|
||||
{Name: "memory", Weight: 2},
|
||||
{Name: "cpu", Weight: 1},
|
||||
},
|
||||
},
|
||||
{
|
||||
// resource with negative weight is not allowed
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine", 4000, 10000)},
|
||||
args: config.NodeResourcesLeastAllocatedArgs{Resources: []config.ResourceSpec{{Name: "memory", Weight: -1}, {Name: "cpu", Weight: 1}}},
|
||||
wantErr: field.ErrorList{
|
||||
name: "resource with negative weight",
|
||||
requestedPod: st.MakePod().Node("node1").
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
resources: []config.ResourceSpec{
|
||||
{Name: "memory", Weight: -1},
|
||||
{Name: "cpu", Weight: 1},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[0].weight",
|
||||
},
|
||||
}.ToAggregate(),
|
||||
name: "resource with negtive weight",
|
||||
},
|
||||
},
|
||||
{
|
||||
// resource with zero weight is not allowed
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine", 4000, 10000)},
|
||||
args: config.NodeResourcesLeastAllocatedArgs{Resources: []config.ResourceSpec{{Name: "memory", Weight: 1}, {Name: "cpu", Weight: 0}}},
|
||||
wantErr: field.ErrorList{
|
||||
name: "resource with zero weight",
|
||||
requestedPod: st.MakePod().Node("node1").
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
existingPods: nil,
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 41}, {Name: "node2", Score: 50}},
|
||||
resources: []config.ResourceSpec{
|
||||
{Name: "memory", Weight: 1},
|
||||
{Name: "cpu", Weight: 0},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[1].weight",
|
||||
},
|
||||
}.ToAggregate(),
|
||||
name: "resource with zero weight",
|
||||
},
|
||||
},
|
||||
{
|
||||
// resource weight should be less than MaxNodeScore
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine", 4000, 10000)},
|
||||
args: config.NodeResourcesLeastAllocatedArgs{Resources: []config.ResourceSpec{{Name: "memory", Weight: 120}}},
|
||||
wantErr: field.ErrorList{
|
||||
name: "resource weight larger than MaxNodeScore",
|
||||
requestedPod: st.MakePod().Node("node1").
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
resources: []config.ResourceSpec{
|
||||
{Name: "memory", Weight: 1},
|
||||
{Name: "cpu", Weight: 101},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[0].weight",
|
||||
Field: "resources[1].weight",
|
||||
},
|
||||
}.ToAggregate(),
|
||||
name: "resource weight larger than MaxNodeScore",
|
||||
},
|
||||
},
|
||||
{
|
||||
// Bypass extended resource if the pod does not request.
|
||||
@ -323,14 +337,17 @@ func TestNodeResourcesLeastAllocated(t *testing.T) {
|
||||
// the final scores are:
|
||||
// - node1: (50 + 50) / 2 = 50
|
||||
// - node2: (50 + 50) / 2 = 50
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
name: "bypass extended resource if the pod does not request",
|
||||
requestedPod: st.MakePod().Node("node1").
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
makeNode("machine1", 6000, 10000),
|
||||
makeNodeWithExtendedResource("machine2", 6000, 10000, map[string]int64{extendedRes: 4}),
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000", v1.ResourceName(extendedRes): "4"}).Obj(),
|
||||
},
|
||||
args: config.NodeResourcesLeastAllocatedArgs{Resources: extendedResourceLeastAllocatedSet},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 50}, {Name: "machine2", Score: 50}},
|
||||
name: "bypass extended resource if the pod does not request",
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 50}, {Name: "node2", Score: 50}},
|
||||
resources: extendedResourceLeastAllocatedSet,
|
||||
},
|
||||
{
|
||||
// Honor extended resource if the pod requests.
|
||||
@ -341,47 +358,52 @@ func TestNodeResourcesLeastAllocated(t *testing.T) {
|
||||
// So the final scores are:
|
||||
// - node1: (50 + 50 + 50) / 3 = 50
|
||||
// - node2: (50 + 50 + 80) / 3 = 60
|
||||
pod: &v1.Pod{Spec: cpuMemoryAndExtendedRes},
|
||||
name: "honor extended resource if the pod requests",
|
||||
requestedPod: st.MakePod().Node("node1").
|
||||
Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000", v1.ResourceName(extendedRes): "2"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
makeNodeWithExtendedResource("machine1", 6000, 10000, map[string]int64{extendedRes: 4}),
|
||||
makeNodeWithExtendedResource("machine2", 6000, 10000, map[string]int64{extendedRes: 10}),
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000", v1.ResourceName(extendedRes): "4"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000", v1.ResourceName(extendedRes): "10"}).Obj(),
|
||||
},
|
||||
args: config.NodeResourcesLeastAllocatedArgs{Resources: extendedResourceLeastAllocatedSet},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 50}, {Name: "machine2", Score: 60}},
|
||||
name: "honor extended resource if the pod requests",
|
||||
existingPods: nil,
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 50}, {Name: "node2", Score: 60}},
|
||||
resources: extendedResourceLeastAllocatedSet,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
state := framework.NewCycleState()
|
||||
snapshot := cache.NewSnapshot(test.existingPods, test.nodes)
|
||||
fh, _ := runtime.NewFramework(nil, nil, runtime.WithSnapshotSharedLister(snapshot))
|
||||
p, err := NewLeastAllocated(&test.args, fh, feature.Features{EnablePodOverhead: true})
|
||||
|
||||
if test.wantErr != nil {
|
||||
if err != nil {
|
||||
diff := cmp.Diff(test.wantErr, err, cmpopts.IgnoreFields(field.Error{}, "BadValue", "Detail"))
|
||||
if diff != "" {
|
||||
t.Fatalf("got err (-want,+got):\n%s", diff)
|
||||
}
|
||||
} else {
|
||||
t.Fatalf("no error produced, wanted %v", test.wantErr)
|
||||
}
|
||||
p, err := NewFit(
|
||||
&config.NodeResourcesFitArgs{
|
||||
ScoringStrategy: &config.ScoringStrategy{
|
||||
Type: config.LeastAllocated,
|
||||
Resources: test.resources,
|
||||
},
|
||||
}, fh, plfeature.Features{EnablePodOverhead: true})
|
||||
|
||||
if diff := cmp.Diff(test.wantErrs.ToAggregate(), err, ignoreBadValueDetail); diff != "" {
|
||||
t.Fatalf("got err (-want,+got):\n%s", diff)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil && test.wantErr == nil {
|
||||
t.Fatalf("failed to initialize plugin NodeResourcesLeastAllocated, got error: %v", err)
|
||||
var gotScores framework.NodeScoreList
|
||||
for _, n := range test.nodes {
|
||||
score, status := p.(framework.ScorePlugin).Score(context.Background(), state, test.requestedPod, n.Name)
|
||||
if !status.IsSuccess() {
|
||||
t.Errorf("unexpected error: %v", status)
|
||||
}
|
||||
gotScores = append(gotScores, framework.NodeScore{Name: n.Name, Score: score})
|
||||
}
|
||||
|
||||
for i := range test.nodes {
|
||||
hostResult, err := p.(framework.ScorePlugin).Score(context.Background(), nil, test.pod, test.nodes[i].Name)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.expectedList[i].Score, hostResult) {
|
||||
t.Errorf("expected %#v, got %#v", test.expectedList[i].Score, hostResult)
|
||||
}
|
||||
if diff := cmp.Diff(test.expectedScores, gotScores); diff != "" {
|
||||
t.Errorf("Unexpected scores (-want,+got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -17,79 +17,15 @@ limitations under the License.
|
||||
package noderesources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
|
||||
)
|
||||
|
||||
// MostAllocated is a score plugin that favors nodes with high allocation based on requested resources.
|
||||
type MostAllocated struct {
|
||||
handle framework.Handle
|
||||
resourceAllocationScorer
|
||||
}
|
||||
|
||||
var _ = framework.ScorePlugin(&MostAllocated{})
|
||||
|
||||
// MostAllocatedName is the name of the plugin used in the plugin registry and configurations.
|
||||
const MostAllocatedName = names.NodeResourcesMostAllocated
|
||||
|
||||
// Name returns name of the plugin. It is used in logs, etc.
|
||||
func (ma *MostAllocated) Name() string {
|
||||
return MostAllocatedName
|
||||
}
|
||||
|
||||
// Score invoked at the Score extension point.
|
||||
func (ma *MostAllocated) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) {
|
||||
nodeInfo, err := ma.handle.SnapshotSharedLister().NodeInfos().Get(nodeName)
|
||||
if err != nil {
|
||||
return 0, framework.AsStatus(fmt.Errorf("getting node %q from Snapshot: %w", nodeName, err))
|
||||
}
|
||||
|
||||
// ma.score favors nodes with most requested resources.
|
||||
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
|
||||
// based on the maximum of the average of the fraction of requested to capacity.
|
||||
// Details: (cpu(MaxNodeScore * sum(requested) / capacity) + memory(MaxNodeScore * sum(requested) / capacity)) / weightSum
|
||||
return ma.score(pod, nodeInfo)
|
||||
}
|
||||
|
||||
// ScoreExtensions of the Score plugin.
|
||||
func (ma *MostAllocated) ScoreExtensions() framework.ScoreExtensions {
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewMostAllocated initializes a new plugin and returns it.
|
||||
func NewMostAllocated(maArgs runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) {
|
||||
args, ok := maArgs.(*config.NodeResourcesMostAllocatedArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type NodeResourcesMostAllocatedArgs, got %T", args)
|
||||
}
|
||||
if err := validation.ValidateNodeResourcesMostAllocatedArgs(nil, args); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resToWeightMap := make(resourceToWeightMap)
|
||||
for _, resource := range (*args).Resources {
|
||||
resToWeightMap[v1.ResourceName(resource.Name)] = resource.Weight
|
||||
}
|
||||
|
||||
return &MostAllocated{
|
||||
handle: h,
|
||||
resourceAllocationScorer: resourceAllocationScorer{
|
||||
Name: MostAllocatedName,
|
||||
scorer: mostResourceScorer(resToWeightMap),
|
||||
resourceToWeightMap: resToWeightMap,
|
||||
enablePodOverhead: fts.EnablePodOverhead,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// mostResourceScorer favors nodes with most requested resources.
|
||||
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
|
||||
// based on the maximum of the average of the fraction of requested to capacity.
|
||||
//
|
||||
// Details:
|
||||
// (cpu(MaxNodeScore * sum(requested) / capacity) + memory(MaxNodeScore * sum(requested) / capacity)) / weightSum
|
||||
func mostResourceScorer(resToWeightMap resourceToWeightMap) func(requested, allocable resourceToValueMap) int64 {
|
||||
return func(requested, allocable resourceToValueMap) int64 {
|
||||
var nodeScore, weightSum int64
|
||||
|
@ -18,126 +18,39 @@ package noderesources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||
plfeature "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
)
|
||||
|
||||
func TestNodeResourcesMostAllocated(t *testing.T) {
|
||||
labels1 := map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "blah",
|
||||
}
|
||||
labels2 := map[string]string{
|
||||
"bar": "foo",
|
||||
"baz": "blah",
|
||||
}
|
||||
noResources := v1.PodSpec{
|
||||
Containers: []v1.Container{},
|
||||
}
|
||||
cpuOnly := v1.PodSpec{
|
||||
NodeName: "machine1",
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("1000m"),
|
||||
v1.ResourceMemory: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("2000m"),
|
||||
v1.ResourceMemory: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
cpuOnly2 := cpuOnly
|
||||
cpuOnly2.NodeName = "machine2"
|
||||
cpuAndMemory := v1.PodSpec{
|
||||
NodeName: "machine2",
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("1000m"),
|
||||
v1.ResourceMemory: resource.MustParse("2000"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("2000m"),
|
||||
v1.ResourceMemory: resource.MustParse("3000"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
bigCPUAndMemory := v1.PodSpec{
|
||||
NodeName: "machine1",
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("2000m"),
|
||||
v1.ResourceMemory: resource.MustParse("4000"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3000m"),
|
||||
v1.ResourceMemory: resource.MustParse("5000"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
nonZeroContainer := v1.PodSpec{
|
||||
Containers: []v1.Container{{}},
|
||||
}
|
||||
nonZeroContainer1 := v1.PodSpec{
|
||||
NodeName: "machine1",
|
||||
Containers: []v1.Container{{}},
|
||||
}
|
||||
defaultResourceMostAllocatedSet := []config.ResourceSpec{
|
||||
func TestMostAllocatedScoringStrategy(t *testing.T) {
|
||||
defaultResources := []config.ResourceSpec{
|
||||
{Name: string(v1.ResourceCPU), Weight: 1},
|
||||
{Name: string(v1.ResourceMemory), Weight: 1},
|
||||
}
|
||||
extendedRes := "abc.com/xyz"
|
||||
extendedResourceMostAllocatedSet := []config.ResourceSpec{
|
||||
extendedResourceLeastAllocatedSet := []config.ResourceSpec{
|
||||
{Name: string(v1.ResourceCPU), Weight: 1},
|
||||
{Name: string(v1.ResourceMemory), Weight: 1},
|
||||
{Name: extendedRes, Weight: 1},
|
||||
}
|
||||
cpuMemoryAndExtendedRes := *cpuAndMemory.DeepCopy()
|
||||
cpuMemoryAndExtendedRes.Containers[0].Resources.Requests[v1.ResourceName(extendedRes)] = resource.MustParse("2")
|
||||
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
args config.NodeResourcesMostAllocatedArgs
|
||||
wantErr error
|
||||
expectedList framework.NodeScoreList
|
||||
name string
|
||||
name string
|
||||
requestedPod *v1.Pod
|
||||
nodes []*v1.Node
|
||||
existingPods []*v1.Pod
|
||||
expectedScores framework.NodeScoreList
|
||||
resources []config.ResourceSpec
|
||||
wantErrs field.ErrorList
|
||||
}{
|
||||
{
|
||||
// Node1 scores (used resources) on 0-MaxNodeScore scale
|
||||
@ -148,11 +61,15 @@ func TestNodeResourcesMostAllocated(t *testing.T) {
|
||||
// CPU Score: (0 * MaxNodeScore) / 4000 = 0
|
||||
// Memory Score: (0 * MaxNodeScore) / 10000 = 0
|
||||
// Node2 Score: (0 + 0) / 2 = 0
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
args: config.NodeResourcesMostAllocatedArgs{Resources: defaultResourceMostAllocatedSet},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
name: "nothing scheduled, nothing requested",
|
||||
requestedPod: st.MakePod().Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
existingPods: nil,
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: framework.MinNodeScore}, {Name: "node2", Score: framework.MinNodeScore}},
|
||||
resources: defaultResources,
|
||||
},
|
||||
{
|
||||
// Node1 scores on 0-MaxNodeScore scale
|
||||
@ -163,18 +80,32 @@ func TestNodeResourcesMostAllocated(t *testing.T) {
|
||||
// CPU Score: (3000 * MaxNodeScore) / 6000 = 50
|
||||
// Memory Score: (5000 * MaxNodeScore) / 10000 = 50
|
||||
// Node2 Score: (50 + 50) / 2 = 50
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||
args: config.NodeResourcesMostAllocatedArgs{Resources: defaultResourceMostAllocatedSet},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 62}, {Name: "machine2", Score: 50}},
|
||||
name: "nothing scheduled, resources requested, differently sized machines",
|
||||
name: "nothing scheduled, resources requested, differently sized machines",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
existingPods: nil,
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 62}, {Name: "node2", Score: 50}},
|
||||
resources: defaultResources,
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||
args: config.NodeResourcesMostAllocatedArgs{Resources: []config.ResourceSpec{}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
name: "Resources not set, nothing scheduled, resources requested, differently sized machines",
|
||||
name: "Resources not set, nothing scheduled, resources requested, differently sized machines",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
existingPods: nil,
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: framework.MinNodeScore}, {Name: "node2", Score: framework.MinNodeScore}},
|
||||
resources: nil,
|
||||
},
|
||||
{
|
||||
// Node1 scores on 0-MaxNodeScore scale
|
||||
@ -185,17 +116,20 @@ func TestNodeResourcesMostAllocated(t *testing.T) {
|
||||
// CPU Score: (6000 * MaxNodeScore) / 10000 = 60
|
||||
// Memory Score: (5000 * MaxNodeScore) / 20000 = 25
|
||||
// Node2 Score: (60 + 25) / 2 = 42
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
args: config.NodeResourcesMostAllocatedArgs{Resources: defaultResourceMostAllocatedSet},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 30}, {Name: "machine2", Score: 42}},
|
||||
name: "no resources requested, pods scheduled with resources",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: cpuOnly, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: cpuOnly2, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: cpuAndMemory, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
requestedPod: st.MakePod().Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "10000", "memory": "20000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "10000", "memory": "20000"}).Obj(),
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "0"}).Obj(),
|
||||
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "0"}).Obj(),
|
||||
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "0"}).Obj(),
|
||||
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}).Obj(),
|
||||
},
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 30}, {Name: "node2", Score: 42}},
|
||||
resources: defaultResources,
|
||||
},
|
||||
{
|
||||
// Node1 scores on 0-MaxNodeScore scale
|
||||
@ -206,15 +140,21 @@ func TestNodeResourcesMostAllocated(t *testing.T) {
|
||||
// CPU Score: (6000 * MaxNodeScore) / 10000 = 60
|
||||
// Memory Score: (10000 * MaxNodeScore) / 20000 = 50
|
||||
// Node2 Score: (60 + 50) / 2 = 55
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
args: config.NodeResourcesMostAllocatedArgs{Resources: defaultResourceMostAllocatedSet},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 42}, {Name: "machine2", Score: 55}},
|
||||
name: "resources requested, pods scheduled with resources",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
name: "resources requested, pods scheduled with resources",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "10000", "memory": "20000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "10000", "memory": "20000"}).Obj(),
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "0"}).Obj(),
|
||||
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}).Obj(),
|
||||
},
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 42}, {Name: "node2", Score: 55}},
|
||||
resources: defaultResources,
|
||||
},
|
||||
{
|
||||
// Node1 scores on 0-MaxNodeScore scale
|
||||
@ -225,11 +165,18 @@ func TestNodeResourcesMostAllocated(t *testing.T) {
|
||||
// CPU Score: (5000 * MaxNodeScore) / 10000 = 50
|
||||
// Memory Score: 8000 *MaxNodeScore / 8000 return 100
|
||||
// Node2 Score: (50 + 100) / 2 = 75
|
||||
pod: &v1.Pod{Spec: bigCPUAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine1", 5000, 10000), makeNode("machine2", 10000, 9000)},
|
||||
args: config.NodeResourcesMostAllocatedArgs{Resources: defaultResourceMostAllocatedSet},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 95}, {Name: "machine2", Score: 75}},
|
||||
name: "resources requested equal node capacity",
|
||||
name: "resources requested equal node capacity",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "4000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "5000", "memory": "10000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "10000", "memory": "9000"}).Obj(),
|
||||
},
|
||||
existingPods: nil,
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 95}, {Name: "node2", Score: 75}},
|
||||
resources: defaultResources,
|
||||
},
|
||||
{
|
||||
// CPU Score: (3000 *100) / 4000 = 75
|
||||
@ -238,69 +185,105 @@ func TestNodeResourcesMostAllocated(t *testing.T) {
|
||||
// CPU Score: (3000 *100) / 6000 = 50
|
||||
// Memory Score: (5000 *100) / 10000 = 50
|
||||
// Node2 Score: (50 * 1 + 50 * 2) / (1 + 2) = 50
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||
args: config.NodeResourcesMostAllocatedArgs{Resources: []config.ResourceSpec{{Name: "memory", Weight: 2}, {Name: "cpu", Weight: 1}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 58}, {Name: "machine2", Score: 50}},
|
||||
name: "nothing scheduled, resources requested, differently sized machines",
|
||||
name: "nothing scheduled, resources requested, differently sized machines",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
existingPods: nil,
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 58}, {Name: "node2", Score: 50}},
|
||||
resources: []config.ResourceSpec{
|
||||
{Name: "memory", Weight: 2},
|
||||
{Name: "cpu", Weight: 1},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Node1 scores on 0-MaxNodeScore scale
|
||||
// CPU Fraction: 300 / 250 = 100%
|
||||
// Memory Fraction: 600 / 10000 = 60%
|
||||
// Memory Fraction: 600 / 1000 = 60%
|
||||
// Node1 Score: (100 + 60) / 2 = 80
|
||||
// Node2 scores on 0-MaxNodeScore scale
|
||||
// CPU Fraction: 100 / 250 = 40%
|
||||
// Memory Fraction: 200 / 10000 = 20%
|
||||
// Memory Fraction: 200 / 1000 = 20%
|
||||
// Node2 Score: (20 + 40) / 2 = 30
|
||||
pod: &v1.Pod{Spec: nonZeroContainer},
|
||||
nodes: []*v1.Node{makeNode("machine1", 250, 1000*1024*1024), makeNode("machine2", 250, 1000*1024*1024)},
|
||||
args: config.NodeResourcesMostAllocatedArgs{Resources: []config.ResourceSpec{{Name: "memory", Weight: 1}, {Name: "cpu", Weight: 1}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 80}, {Name: "machine2", Score: 30}},
|
||||
name: "no resources requested, pods scheduled",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: nonZeroContainer1},
|
||||
{Spec: nonZeroContainer1},
|
||||
name: "no resources requested, pods scheduled, nonzero request for resource",
|
||||
requestedPod: st.MakePod().Container("container").Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "250m", "memory": "1000Mi"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "250m", "memory": "1000Mi"}).Obj(),
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
st.MakePod().Node("node1").Container("container").Obj(),
|
||||
st.MakePod().Node("node1").Container("container").Obj(),
|
||||
},
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 80}, {Name: "node2", Score: 30}},
|
||||
resources: defaultResources,
|
||||
},
|
||||
{
|
||||
// resource with negtive weight is not allowed
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine", 4000, 10000)},
|
||||
args: config.NodeResourcesMostAllocatedArgs{Resources: []config.ResourceSpec{{Name: "memory", Weight: -1}, {Name: "cpu", Weight: 1}}},
|
||||
wantErr: field.ErrorList{
|
||||
name: "resource with negtive weight",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
resources: []config.ResourceSpec{
|
||||
{Name: "memory", Weight: -1},
|
||||
{Name: "cpu", Weight: 1},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[0].weight",
|
||||
},
|
||||
}.ToAggregate(),
|
||||
name: "resource with negtive weight",
|
||||
},
|
||||
},
|
||||
{
|
||||
// resource with zero weight is not allowed
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine", 4000, 10000)},
|
||||
args: config.NodeResourcesMostAllocatedArgs{Resources: []config.ResourceSpec{{Name: "memory", Weight: 1}, {Name: "cpu", Weight: 0}}},
|
||||
wantErr: field.ErrorList{
|
||||
name: "resource with zero weight",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
resources: []config.ResourceSpec{
|
||||
{Name: "memory", Weight: 1},
|
||||
{Name: "cpu", Weight: 0},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[1].weight",
|
||||
},
|
||||
}.ToAggregate(),
|
||||
name: "resource with zero weight",
|
||||
},
|
||||
},
|
||||
{
|
||||
// resource weight should be less than MaxNodeScore
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine", 4000, 10000)},
|
||||
args: config.NodeResourcesMostAllocatedArgs{Resources: []config.ResourceSpec{{Name: "memory", Weight: 120}}},
|
||||
wantErr: field.ErrorList{
|
||||
name: "resource weight larger than MaxNodeScore",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
resources: []config.ResourceSpec{
|
||||
{Name: "memory", Weight: 101},
|
||||
},
|
||||
wantErrs: field.ErrorList{
|
||||
&field.Error{
|
||||
Type: field.ErrorTypeInvalid,
|
||||
Field: "resources[0].weight",
|
||||
},
|
||||
}.ToAggregate(),
|
||||
name: "resource weight larger than MaxNodeScore",
|
||||
},
|
||||
},
|
||||
{
|
||||
// Bypass extended resource if the pod does not request.
|
||||
@ -309,14 +292,18 @@ func TestNodeResourcesMostAllocated(t *testing.T) {
|
||||
// the final scores are:
|
||||
// - node1: (50 + 50) / 2 = 50
|
||||
// - node2: (50 + 50) / 2 = 50
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
name: "bypass extended resource if the pod does not request",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
makeNode("machine1", 6000, 10000),
|
||||
makeNodeWithExtendedResource("machine2", 6000, 10000, map[string]int64{extendedRes: 4}),
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000", v1.ResourceName(extendedRes): "4"}).Obj(),
|
||||
},
|
||||
args: config.NodeResourcesMostAllocatedArgs{Resources: extendedResourceMostAllocatedSet},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 50}, {Name: "machine2", Score: 50}},
|
||||
name: "bypass extended resource if the pod does not request",
|
||||
resources: extendedResourceLeastAllocatedSet,
|
||||
existingPods: nil,
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 50}, {Name: "node2", Score: 50}},
|
||||
},
|
||||
{
|
||||
// Honor extended resource if the pod requests.
|
||||
@ -327,47 +314,52 @@ func TestNodeResourcesMostAllocated(t *testing.T) {
|
||||
// So the final scores are:
|
||||
// - node1: (50 + 50 + 50) / 3 = 50
|
||||
// - node2: (50 + 50 + 20) / 3 = 40
|
||||
pod: &v1.Pod{Spec: cpuMemoryAndExtendedRes},
|
||||
name: "honor extended resource if the pod request",
|
||||
requestedPod: st.MakePod().Node("node1").
|
||||
Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000", v1.ResourceName(extendedRes): "2"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
makeNodeWithExtendedResource("machine1", 6000, 10000, map[string]int64{extendedRes: 4}),
|
||||
makeNodeWithExtendedResource("machine2", 6000, 10000, map[string]int64{extendedRes: 10}),
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000", v1.ResourceName(extendedRes): "4"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000", v1.ResourceName(extendedRes): "10"}).Obj(),
|
||||
},
|
||||
args: config.NodeResourcesMostAllocatedArgs{Resources: extendedResourceMostAllocatedSet},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 50}, {Name: "machine2", Score: 40}},
|
||||
name: "honor extended resource if the pod requests",
|
||||
resources: extendedResourceLeastAllocatedSet,
|
||||
existingPods: nil,
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 50}, {Name: "node2", Score: 40}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
state := framework.NewCycleState()
|
||||
snapshot := cache.NewSnapshot(test.existingPods, test.nodes)
|
||||
fh, _ := runtime.NewFramework(nil, nil, runtime.WithSnapshotSharedLister(snapshot))
|
||||
p, err := NewMostAllocated(&test.args, fh, feature.Features{EnablePodOverhead: true})
|
||||
|
||||
if test.wantErr != nil {
|
||||
if err != nil {
|
||||
diff := cmp.Diff(test.wantErr, err, cmpopts.IgnoreFields(field.Error{}, "BadValue", "Detail"))
|
||||
if diff != "" {
|
||||
t.Fatalf("got err (-want,+got):\n%s", diff)
|
||||
}
|
||||
} else {
|
||||
t.Fatalf("no error produced, wanted %v", test.wantErr)
|
||||
}
|
||||
p, err := NewFit(
|
||||
&config.NodeResourcesFitArgs{
|
||||
ScoringStrategy: &config.ScoringStrategy{
|
||||
Type: config.MostAllocated,
|
||||
Resources: test.resources,
|
||||
},
|
||||
}, fh, plfeature.Features{EnablePodOverhead: true})
|
||||
|
||||
if diff := cmp.Diff(test.wantErrs.ToAggregate(), err, ignoreBadValueDetail); diff != "" {
|
||||
t.Fatalf("got err (-want,+got):\n%s", diff)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil && test.wantErr == nil {
|
||||
t.Fatalf("failed to initialize plugin NodeResourcesMostAllocated, got error: %v", err)
|
||||
var gotScores framework.NodeScoreList
|
||||
for _, n := range test.nodes {
|
||||
score, status := p.(framework.ScorePlugin).Score(context.Background(), state, test.requestedPod, n.Name)
|
||||
if !status.IsSuccess() {
|
||||
t.Errorf("unexpected error: %v", status)
|
||||
}
|
||||
gotScores = append(gotScores, framework.NodeScore{Name: n.Name, Score: score})
|
||||
}
|
||||
|
||||
for i := range test.nodes {
|
||||
hostResult, err := p.(framework.ScorePlugin).Score(context.Background(), nil, test.pod, test.nodes[i].Name)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.expectedList[i].Score, hostResult) {
|
||||
t.Errorf("got score %v for host %v, expected %v", hostResult, test.nodes[i].Name, test.expectedList[i].Score)
|
||||
}
|
||||
if diff := cmp.Diff(test.expectedScores, gotScores); diff != "" {
|
||||
t.Errorf("Unexpected scores (-want,+got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -17,86 +17,19 @@ limitations under the License.
|
||||
package noderesources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
|
||||
)
|
||||
|
||||
const (
|
||||
// RequestedToCapacityRatioName is the name of this plugin.
|
||||
RequestedToCapacityRatioName = names.RequestedToCapacityRatio
|
||||
maxUtilization = 100
|
||||
maxUtilization = 100
|
||||
)
|
||||
|
||||
// NewRequestedToCapacityRatio initializes a new plugin and returns it.
|
||||
func NewRequestedToCapacityRatio(plArgs runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) {
|
||||
args, err := getRequestedToCapacityRatioArgs(plArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := validation.ValidateRequestedToCapacityRatioArgs(nil, &args); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resourceToWeightMap := resourcesToWeightMap(args.Resources)
|
||||
|
||||
return &RequestedToCapacityRatio{
|
||||
handle: handle,
|
||||
resourceAllocationScorer: resourceAllocationScorer{
|
||||
Name: RequestedToCapacityRatioName,
|
||||
scorer: requestedToCapacityRatioScorer(resourceToWeightMap, args.Shape),
|
||||
resourceToWeightMap: resourceToWeightMap,
|
||||
enablePodOverhead: fts.EnablePodOverhead,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getRequestedToCapacityRatioArgs(obj runtime.Object) (config.RequestedToCapacityRatioArgs, error) {
|
||||
ptr, ok := obj.(*config.RequestedToCapacityRatioArgs)
|
||||
if !ok {
|
||||
return config.RequestedToCapacityRatioArgs{}, fmt.Errorf("want args to be of type RequestedToCapacityRatioArgs, got %T", obj)
|
||||
}
|
||||
return *ptr, nil
|
||||
}
|
||||
|
||||
// RequestedToCapacityRatio is a score plugin that allow users to apply bin packing
|
||||
// buildRequestedToCapacityRatioScorerFunction allows users to apply bin packing
|
||||
// on core resources like CPU, Memory as well as extended resources like accelerators.
|
||||
type RequestedToCapacityRatio struct {
|
||||
handle framework.Handle
|
||||
resourceAllocationScorer
|
||||
}
|
||||
|
||||
var _ framework.ScorePlugin = &RequestedToCapacityRatio{}
|
||||
|
||||
// Name returns name of the plugin. It is used in logs, etc.
|
||||
func (pl *RequestedToCapacityRatio) Name() string {
|
||||
return RequestedToCapacityRatioName
|
||||
}
|
||||
|
||||
// Score invoked at the score extension point.
|
||||
func (pl *RequestedToCapacityRatio) Score(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) {
|
||||
nodeInfo, err := pl.handle.SnapshotSharedLister().NodeInfos().Get(nodeName)
|
||||
if err != nil {
|
||||
return 0, framework.AsStatus(fmt.Errorf("getting node %q from Snapshot: %w", nodeName, err))
|
||||
}
|
||||
return pl.score(pod, nodeInfo)
|
||||
}
|
||||
|
||||
// ScoreExtensions of the Score plugin.
|
||||
func (pl *RequestedToCapacityRatio) ScoreExtensions() framework.ScoreExtensions {
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildRequestedToCapacityRatioScorerFunction(scoringFunctionShape helper.FunctionShape, resourceToWeightMap resourceToWeightMap) func(resourceToValueMap, resourceToValueMap) int64 {
|
||||
rawScoringFunction := helper.BuildBrokenLinearFunction(scoringFunctionShape)
|
||||
resourceScoringFunction := func(requested, capacity int64) int64 {
|
||||
|
@ -19,107 +19,133 @@ package noderesources
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||
plfeature "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
)
|
||||
|
||||
func TestRequestedToCapacityRatio(t *testing.T) {
|
||||
type test struct {
|
||||
name string
|
||||
requestedPod *v1.Pod
|
||||
nodes []*v1.Node
|
||||
scheduledPods []*v1.Pod
|
||||
expectedPriorities framework.NodeScoreList
|
||||
func TestRequestedToCapacityRatioScoringStrategy(t *testing.T) {
|
||||
defaultResources := []config.ResourceSpec{
|
||||
{Name: string(v1.ResourceCPU), Weight: 1},
|
||||
{Name: string(v1.ResourceMemory), Weight: 1},
|
||||
}
|
||||
|
||||
tests := []test{
|
||||
shape := []config.UtilizationShapePoint{
|
||||
{Utilization: 0, Score: 10},
|
||||
{Utilization: 100, Score: 0},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
requestedPod *v1.Pod
|
||||
nodes []*v1.Node
|
||||
existingPods []*v1.Pod
|
||||
expectedScores framework.NodeScoreList
|
||||
resources []config.ResourceSpec
|
||||
shape []config.UtilizationShapePoint
|
||||
wantErrs field.ErrorList
|
||||
}{
|
||||
{
|
||||
name: "nothing scheduled, nothing requested (default - least requested nodes have priority)",
|
||||
requestedPod: makePod("", 0, 0),
|
||||
nodes: []*v1.Node{makeNode("node1", 4000, 10000), makeNode("node2", 4000, 10000)},
|
||||
scheduledPods: []*v1.Pod{makePod("node1", 0, 0), makePod("node2", 0, 0)},
|
||||
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 100}, {Name: "node2", Score: 100}},
|
||||
name: "nothing scheduled, nothing requested (default - least requested nodes have priority)",
|
||||
requestedPod: st.MakePod().Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
st.MakePod().Node("node1").Obj(),
|
||||
st.MakePod().Node("node1").Obj(),
|
||||
},
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}},
|
||||
resources: defaultResources,
|
||||
shape: shape,
|
||||
},
|
||||
{
|
||||
name: "nothing scheduled, resources requested, differently sized machines (default - least requested nodes have priority)",
|
||||
requestedPod: makePod("", 3000, 5000),
|
||||
nodes: []*v1.Node{makeNode("node1", 4000, 10000), makeNode("node2", 6000, 10000)},
|
||||
scheduledPods: []*v1.Pod{makePod("node1", 0, 0), makePod("node2", 0, 0)},
|
||||
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 38}, {Name: "node2", Score: 50}},
|
||||
name: "nothing scheduled, resources requested, differently sized machines (default - least requested nodes have priority)",
|
||||
requestedPod: st.MakePod().
|
||||
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||
Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "3000"}).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
st.MakePod().Node("node1").Obj(),
|
||||
st.MakePod().Node("node1").Obj(),
|
||||
},
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 38}, {Name: "node2", Score: 50}},
|
||||
resources: defaultResources,
|
||||
shape: shape,
|
||||
},
|
||||
{
|
||||
name: "no resources requested, pods scheduled with resources (default - least requested nodes have priority)",
|
||||
requestedPod: makePod("", 0, 0),
|
||||
nodes: []*v1.Node{makeNode("node1", 4000, 10000), makeNode("node2", 6000, 10000)},
|
||||
scheduledPods: []*v1.Pod{makePod("node1", 3000, 5000), makePod("node2", 3000, 5000)},
|
||||
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 38}, {Name: "node2", Score: 50}},
|
||||
name: "no resources requested, pods scheduled with resources (default - least requested nodes have priority)",
|
||||
requestedPod: st.MakePod().Obj(),
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(),
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}).Obj(),
|
||||
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}).Obj(),
|
||||
},
|
||||
expectedScores: []framework.NodeScore{{Name: "node1", Score: 38}, {Name: "node2", Score: 50}},
|
||||
resources: defaultResources,
|
||||
shape: shape,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
state := framework.NewCycleState()
|
||||
snapshot := cache.NewSnapshot(test.scheduledPods, test.nodes)
|
||||
snapshot := cache.NewSnapshot(test.existingPods, test.nodes)
|
||||
fh, _ := runtime.NewFramework(nil, nil, runtime.WithSnapshotSharedLister(snapshot))
|
||||
args := config.RequestedToCapacityRatioArgs{
|
||||
Shape: []config.UtilizationShapePoint{
|
||||
{Utilization: 0, Score: 10},
|
||||
{Utilization: 100, Score: 0},
|
||||
},
|
||||
Resources: []config.ResourceSpec{
|
||||
{Name: "memory", Weight: 1},
|
||||
{Name: "cpu", Weight: 1},
|
||||
|
||||
p, err := NewFit(&config.NodeResourcesFitArgs{
|
||||
ScoringStrategy: &config.ScoringStrategy{
|
||||
Type: config.RequestedToCapacityRatio,
|
||||
Resources: test.resources,
|
||||
RequestedToCapacityRatio: &config.RequestedToCapacityRatioParam{
|
||||
Shape: shape,
|
||||
},
|
||||
},
|
||||
}, fh, plfeature.Features{EnablePodOverhead: true})
|
||||
|
||||
if diff := cmp.Diff(test.wantErrs.ToAggregate(), err, ignoreBadValueDetail); diff != "" {
|
||||
t.Fatalf("got err (-want,+got):\n%s", diff)
|
||||
}
|
||||
p, err := NewRequestedToCapacityRatio(&args, fh, feature.Features{EnablePodOverhead: true})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
var gotPriorities framework.NodeScoreList
|
||||
var gotScores framework.NodeScoreList
|
||||
for _, n := range test.nodes {
|
||||
score, status := p.(framework.ScorePlugin).Score(context.Background(), state, test.requestedPod, n.Name)
|
||||
if !status.IsSuccess() {
|
||||
t.Errorf("unexpected error: %v", status)
|
||||
}
|
||||
gotPriorities = append(gotPriorities, framework.NodeScore{Name: n.Name, Score: score})
|
||||
gotScores = append(gotScores, framework.NodeScore{Name: n.Name, Score: score})
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(test.expectedPriorities, gotPriorities) {
|
||||
t.Errorf("expected:\n\t%+v,\ngot:\n\t%+v", test.expectedPriorities, gotPriorities)
|
||||
if diff := cmp.Diff(test.expectedScores, gotScores); diff != "" {
|
||||
t.Errorf("Unexpected nodes (-want,+got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func makePod(node string, milliCPU, memory int64) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: node,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(memory, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestBrokenLinearFunction(t *testing.T) {
|
||||
type Assertion struct {
|
||||
p int64
|
||||
@ -195,9 +221,6 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
"intel.com/foo": 8,
|
||||
}
|
||||
|
||||
noResources := v1.PodSpec{
|
||||
Containers: []v1.Container{},
|
||||
}
|
||||
extendedResourcePod1 := v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
@ -223,18 +246,18 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
machine2Pod := extendedResourcePod1
|
||||
machine2Pod.NodeName = "machine2"
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedList framework.NodeScoreList
|
||||
name string
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedScores framework.NodeScoreList
|
||||
name string
|
||||
}{
|
||||
{
|
||||
// Node1 Score = Node2 Score = 0 as the incoming Pod doesn't request extended resource.
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
name: "nothing scheduled, nothing requested",
|
||||
pod: st.MakePod().Obj(),
|
||||
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
|
||||
expectedScores: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
name: "nothing scheduled, nothing requested",
|
||||
},
|
||||
{
|
||||
// Node1 scores (used resources) on 0-MaxNodeScore scale
|
||||
@ -248,12 +271,12 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
// resourceScoringFunction((0+2),4)
|
||||
// = 2/4 * maxUtilization = 50 = rawScoringFunction(50)
|
||||
// Node2 Score: 5
|
||||
pod: &v1.Pod{Spec: extendedResourcePod1},
|
||||
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 2}, {Name: "machine2", Score: 5}},
|
||||
name: "resources requested, pods scheduled with less resources",
|
||||
pod: &v1.Pod{Spec: extendedResourcePod1},
|
||||
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
|
||||
expectedScores: []framework.NodeScore{{Name: "machine1", Score: 2}, {Name: "machine2", Score: 5}},
|
||||
name: "resources requested, pods scheduled with less resources",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: noResources},
|
||||
st.MakePod().Obj(),
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -268,10 +291,10 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
// resourceScoringFunction((2+2),4)
|
||||
// = 4/4 * maxUtilization = maxUtilization = rawScoringFunction(maxUtilization)
|
||||
// Node2 Score: 10
|
||||
pod: &v1.Pod{Spec: extendedResourcePod1},
|
||||
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 2}, {Name: "machine2", Score: 10}},
|
||||
name: "resources requested, pods scheduled with resources, on node with existing pod running ",
|
||||
pod: &v1.Pod{Spec: extendedResourcePod1},
|
||||
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
|
||||
expectedScores: []framework.NodeScore{{Name: "machine1", Score: 2}, {Name: "machine2", Score: 10}},
|
||||
name: "resources requested, pods scheduled with resources, on node with existing pod running ",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: machine2Pod},
|
||||
},
|
||||
@ -288,12 +311,12 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
// resourceScoringFunction((0+4),4)
|
||||
// = 4/4 * maxUtilization = maxUtilization = rawScoringFunction(maxUtilization)
|
||||
// Node2 Score: 10
|
||||
pod: &v1.Pod{Spec: extendedResourcePod2},
|
||||
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 10}},
|
||||
name: "resources requested, pods scheduled with more resources",
|
||||
pod: &v1.Pod{Spec: extendedResourcePod2},
|
||||
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResource2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResource1)},
|
||||
expectedScores: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 10}},
|
||||
name: "resources requested, pods scheduled with more resources",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: noResources},
|
||||
st.MakePod().Obj(),
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -303,16 +326,21 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
state := framework.NewCycleState()
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
fh, _ := runtime.NewFramework(nil, nil, runtime.WithSnapshotSharedLister(snapshot))
|
||||
args := config.RequestedToCapacityRatioArgs{
|
||||
Shape: []config.UtilizationShapePoint{
|
||||
{Utilization: 0, Score: 0},
|
||||
{Utilization: 100, Score: 1},
|
||||
},
|
||||
Resources: []config.ResourceSpec{
|
||||
{Name: "intel.com/foo", Weight: 1},
|
||||
args := config.NodeResourcesFitArgs{
|
||||
ScoringStrategy: &config.ScoringStrategy{
|
||||
Type: config.RequestedToCapacityRatio,
|
||||
Resources: []config.ResourceSpec{
|
||||
{Name: "intel.com/foo", Weight: 1},
|
||||
},
|
||||
RequestedToCapacityRatio: &config.RequestedToCapacityRatioParam{
|
||||
Shape: []config.UtilizationShapePoint{
|
||||
{Utilization: 0, Score: 0},
|
||||
{Utilization: 100, Score: 1},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
p, err := NewRequestedToCapacityRatio(&args, fh, feature.Features{EnablePodOverhead: true})
|
||||
p, err := NewFit(&args, fh, feature.Features{EnablePodOverhead: true})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@ -326,8 +354,8 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
gotList = append(gotList, framework.NodeScore{Name: n.Name, Score: score})
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(test.expectedList, gotList) {
|
||||
t.Errorf("expected %#v, got %#v", test.expectedList, gotList)
|
||||
if diff := cmp.Diff(test.expectedScores, gotList); diff != "" {
|
||||
t.Errorf("Unexpected nodescore list (-want,+got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -346,9 +374,6 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
|
||||
"intel.com/bar": 4,
|
||||
}
|
||||
|
||||
noResources := v1.PodSpec{
|
||||
Containers: []v1.Container{},
|
||||
}
|
||||
extnededResourcePod1 := v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
@ -376,11 +401,11 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
|
||||
machine2Pod := extnededResourcePod1
|
||||
machine2Pod.NodeName = "machine2"
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedList framework.NodeScoreList
|
||||
name string
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedScores framework.NodeScoreList
|
||||
name string
|
||||
}{
|
||||
{
|
||||
|
||||
@ -410,10 +435,10 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
|
||||
// = 0/8 * 100 = 0 = rawScoringFunction(0)
|
||||
// Node2 Score: (0 * 3) + (0 * 5) / 8 = 0
|
||||
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
name: "nothing scheduled, nothing requested",
|
||||
pod: st.MakePod().Obj(),
|
||||
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
|
||||
expectedScores: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
name: "nothing scheduled, nothing requested",
|
||||
},
|
||||
|
||||
{
|
||||
@ -444,12 +469,12 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
|
||||
// = 2/8 * 100 = 25 = rawScoringFunction(25)
|
||||
// Node2 Score: (5 * 3) + (2 * 5) / 8 = 3
|
||||
|
||||
pod: &v1.Pod{Spec: extnededResourcePod1},
|
||||
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 4}, {Name: "machine2", Score: 3}},
|
||||
name: "resources requested, pods scheduled with less resources",
|
||||
pod: &v1.Pod{Spec: extnededResourcePod1},
|
||||
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
|
||||
expectedScores: []framework.NodeScore{{Name: "machine1", Score: 4}, {Name: "machine2", Score: 3}},
|
||||
name: "resources requested, pods scheduled with less resources",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: noResources},
|
||||
st.MakePod().Obj(),
|
||||
},
|
||||
},
|
||||
|
||||
@ -480,10 +505,10 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
|
||||
// = 4/8 *100 = 50 = rawScoringFunction(50)
|
||||
// Node2 Score: (10 * 3) + (5 * 5) / 8 = 7
|
||||
|
||||
pod: &v1.Pod{Spec: extnededResourcePod1},
|
||||
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 4}, {Name: "machine2", Score: 7}},
|
||||
name: "resources requested, pods scheduled with resources, on node with existing pod running ",
|
||||
pod: &v1.Pod{Spec: extnededResourcePod1},
|
||||
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
|
||||
expectedScores: []framework.NodeScore{{Name: "machine1", Score: 4}, {Name: "machine2", Score: 7}},
|
||||
name: "resources requested, pods scheduled with resources, on node with existing pod running ",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: machine2Pod},
|
||||
},
|
||||
@ -531,12 +556,12 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
|
||||
// = 2/8 * 100 = 25 = rawScoringFunction(25)
|
||||
// Node2 Score: (10 * 3) + (2 * 5) / 8 = 5
|
||||
|
||||
pod: &v1.Pod{Spec: extnededResourcePod2},
|
||||
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 5}},
|
||||
name: "resources requested, pods scheduled with more resources",
|
||||
pod: &v1.Pod{Spec: extnededResourcePod2},
|
||||
nodes: []*v1.Node{makeNodeWithExtendedResource("machine1", 4000, 10000*1024*1024, extendedResources2), makeNodeWithExtendedResource("machine2", 4000, 10000*1024*1024, extendedResources1)},
|
||||
expectedScores: []framework.NodeScore{{Name: "machine1", Score: 5}, {Name: "machine2", Score: 5}},
|
||||
name: "resources requested, pods scheduled with more resources",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: noResources},
|
||||
st.MakePod().Obj(),
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -546,32 +571,39 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
|
||||
state := framework.NewCycleState()
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
fh, _ := runtime.NewFramework(nil, nil, runtime.WithSnapshotSharedLister(snapshot))
|
||||
args := config.RequestedToCapacityRatioArgs{
|
||||
Shape: []config.UtilizationShapePoint{
|
||||
{Utilization: 0, Score: 0},
|
||||
{Utilization: 100, Score: 1},
|
||||
},
|
||||
Resources: []config.ResourceSpec{
|
||||
{Name: "intel.com/foo", Weight: 3},
|
||||
{Name: "intel.com/bar", Weight: 5},
|
||||
|
||||
args := config.NodeResourcesFitArgs{
|
||||
ScoringStrategy: &config.ScoringStrategy{
|
||||
Type: config.RequestedToCapacityRatio,
|
||||
Resources: []config.ResourceSpec{
|
||||
{Name: "intel.com/foo", Weight: 3},
|
||||
{Name: "intel.com/bar", Weight: 5},
|
||||
},
|
||||
RequestedToCapacityRatio: &config.RequestedToCapacityRatioParam{
|
||||
Shape: []config.UtilizationShapePoint{
|
||||
{Utilization: 0, Score: 0},
|
||||
{Utilization: 100, Score: 1},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
p, err := NewRequestedToCapacityRatio(&args, fh, feature.Features{EnablePodOverhead: true})
|
||||
|
||||
p, err := NewFit(&args, fh, feature.Features{EnablePodOverhead: true})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
var gotList framework.NodeScoreList
|
||||
var gotScores framework.NodeScoreList
|
||||
for _, n := range test.nodes {
|
||||
score, status := p.(framework.ScorePlugin).Score(context.Background(), state, test.pod, n.Name)
|
||||
if !status.IsSuccess() {
|
||||
t.Errorf("unexpected error: %v", status)
|
||||
}
|
||||
gotList = append(gotList, framework.NodeScore{Name: n.Name, Score: score})
|
||||
gotScores = append(gotScores, framework.NodeScore{Name: n.Name, Score: score})
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(test.expectedList, gotList) {
|
||||
t.Errorf("expected %#v, got %#v", test.expectedList, gotList)
|
||||
if diff := cmp.Diff(test.expectedScores, gotScores); diff != "" {
|
||||
t.Errorf("Unexpected nodescore list (-want,+got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -17,9 +17,15 @@ limitations under the License.
|
||||
package noderesources
|
||||
|
||||
import (
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
)
|
||||
|
||||
var (
|
||||
ignoreBadValueDetail = cmpopts.IgnoreFields(field.Error{}, "BadValue", "Detail")
|
||||
)
|
||||
|
||||
func makeNode(node string, milliCPU, memory int64) *v1.Node {
|
||||
|
@ -54,30 +54,27 @@ func NewInTreeRegistry() runtime.Registry {
|
||||
}
|
||||
|
||||
return runtime.Registry{
|
||||
selectorspread.Name: selectorspread.New,
|
||||
imagelocality.Name: imagelocality.New,
|
||||
tainttoleration.Name: tainttoleration.New,
|
||||
nodename.Name: nodename.New,
|
||||
nodeports.Name: nodeports.New,
|
||||
nodeaffinity.Name: nodeaffinity.New,
|
||||
podtopologyspread.Name: podtopologyspread.New,
|
||||
nodeunschedulable.Name: nodeunschedulable.New,
|
||||
noderesources.FitName: runtime.FactoryAdapter(fts, noderesources.NewFit),
|
||||
noderesources.BalancedAllocationName: runtime.FactoryAdapter(fts, noderesources.NewBalancedAllocation),
|
||||
noderesources.MostAllocatedName: runtime.FactoryAdapter(fts, noderesources.NewMostAllocated),
|
||||
noderesources.LeastAllocatedName: runtime.FactoryAdapter(fts, noderesources.NewLeastAllocated),
|
||||
noderesources.RequestedToCapacityRatioName: runtime.FactoryAdapter(fts, noderesources.NewRequestedToCapacityRatio),
|
||||
volumebinding.Name: runtime.FactoryAdapter(fts, volumebinding.New),
|
||||
volumerestrictions.Name: runtime.FactoryAdapter(fts, volumerestrictions.New),
|
||||
volumezone.Name: volumezone.New,
|
||||
nodevolumelimits.CSIName: runtime.FactoryAdapter(fts, nodevolumelimits.NewCSI),
|
||||
nodevolumelimits.EBSName: runtime.FactoryAdapter(fts, nodevolumelimits.NewEBS),
|
||||
nodevolumelimits.GCEPDName: runtime.FactoryAdapter(fts, nodevolumelimits.NewGCEPD),
|
||||
nodevolumelimits.AzureDiskName: runtime.FactoryAdapter(fts, nodevolumelimits.NewAzureDisk),
|
||||
nodevolumelimits.CinderName: runtime.FactoryAdapter(fts, nodevolumelimits.NewCinder),
|
||||
interpodaffinity.Name: runtime.FactoryAdapter(fts, interpodaffinity.New),
|
||||
queuesort.Name: queuesort.New,
|
||||
defaultbinder.Name: defaultbinder.New,
|
||||
defaultpreemption.Name: runtime.FactoryAdapter(fts, defaultpreemption.New),
|
||||
selectorspread.Name: selectorspread.New,
|
||||
imagelocality.Name: imagelocality.New,
|
||||
tainttoleration.Name: tainttoleration.New,
|
||||
nodename.Name: nodename.New,
|
||||
nodeports.Name: nodeports.New,
|
||||
nodeaffinity.Name: nodeaffinity.New,
|
||||
podtopologyspread.Name: podtopologyspread.New,
|
||||
nodeunschedulable.Name: nodeunschedulable.New,
|
||||
noderesources.FitName: runtime.FactoryAdapter(fts, noderesources.NewFit),
|
||||
noderesources.BalancedAllocationName: runtime.FactoryAdapter(fts, noderesources.NewBalancedAllocation),
|
||||
volumebinding.Name: runtime.FactoryAdapter(fts, volumebinding.New),
|
||||
volumerestrictions.Name: runtime.FactoryAdapter(fts, volumerestrictions.New),
|
||||
volumezone.Name: volumezone.New,
|
||||
nodevolumelimits.CSIName: runtime.FactoryAdapter(fts, nodevolumelimits.NewCSI),
|
||||
nodevolumelimits.EBSName: runtime.FactoryAdapter(fts, nodevolumelimits.NewEBS),
|
||||
nodevolumelimits.GCEPDName: runtime.FactoryAdapter(fts, nodevolumelimits.NewGCEPD),
|
||||
nodevolumelimits.AzureDiskName: runtime.FactoryAdapter(fts, nodevolumelimits.NewAzureDisk),
|
||||
nodevolumelimits.CinderName: runtime.FactoryAdapter(fts, nodevolumelimits.NewCinder),
|
||||
interpodaffinity.Name: runtime.FactoryAdapter(fts, interpodaffinity.New),
|
||||
queuesort.Name: queuesort.New,
|
||||
defaultbinder.Name: defaultbinder.New,
|
||||
defaultpreemption.Name: runtime.FactoryAdapter(fts, defaultpreemption.New),
|
||||
}
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ package testing
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
@ -192,6 +192,7 @@ rules:
|
||||
- k8s.io/kubernetes/pkg/scheduler/internal/parallelize
|
||||
- k8s.io/kubernetes/pkg/scheduler/internal/queue
|
||||
- k8s.io/kubernetes/pkg/scheduler/listers
|
||||
- k8s.io/kubernetes/pkg/scheduler/testing
|
||||
- k8s.io/kubernetes/pkg/scheduler/metrics
|
||||
- k8s.io/kubernetes/pkg/scheduler/nodeinfo
|
||||
- k8s.io/kubernetes/pkg/scheduler/util
|
||||
|
Loading…
Reference in New Issue
Block a user