mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
pkg/scheduler: drop Resource.ResourceList() method
The method is used only for testing purposes. Given Resource data type exposes all its fields, any invoker of ResourceList that is still using the method outside of kubernetes/kubernetes can still either copy paste the original implementation or implement a custom method that's converting resources into proper Quantity data type. Given the hugepage resource is a scalar resource, it's sufficient the underlying code under fit_test.go to take into account any extended resources. For predicate_test.go, the hugepage resource does not play any role as the General predicates test cases does not set any scaler resource at all. Additionally, by removing ResourceList method, pkg/scheduler/framework can get rid of dependency on k8s.io/kubernetes/pkg/apis/core/v1/helper.
This commit is contained in:
parent
6067d8556a
commit
7286f9712a
@ -20,7 +20,7 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
@ -144,11 +144,11 @@ func makeAllocatableResources(milliCPU, memory, pods, extendedA, storage, hugePa
|
||||
}
|
||||
}
|
||||
|
||||
func newResourcePod(usage ...schedulerframework.Resource) *v1.Pod {
|
||||
func newResourcePod(containerResources ...v1.ResourceList) *v1.Pod {
|
||||
containers := []v1.Container{}
|
||||
for _, req := range usage {
|
||||
for _, rl := range containerResources {
|
||||
containers = append(containers, v1.Container{
|
||||
Resources: v1.ResourceRequirements{Requests: req.ResourceList()},
|
||||
Resources: v1.ResourceRequirements{Requests: rl},
|
||||
})
|
||||
}
|
||||
return &v1.Pod{
|
||||
@ -187,7 +187,10 @@ func TestGeneralPredicates(t *testing.T) {
|
||||
{
|
||||
pod: &v1.Pod{},
|
||||
nodeInfo: schedulerframework.NewNodeInfo(
|
||||
newResourcePod(schedulerframework.Resource{MilliCPU: 9, Memory: 19})),
|
||||
newResourcePod(v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(9, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(19, resource.BinarySI),
|
||||
})),
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||
@ -197,9 +200,15 @@ func TestGeneralPredicates(t *testing.T) {
|
||||
name: "no resources/port/host requested always fits",
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(schedulerframework.Resource{MilliCPU: 8, Memory: 10}),
|
||||
pod: newResourcePod(v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(8, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10, resource.BinarySI),
|
||||
}),
|
||||
nodeInfo: schedulerframework.NewNodeInfo(
|
||||
newResourcePod(schedulerframework.Resource{MilliCPU: 5, Memory: 19})),
|
||||
newResourcePod(v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(5, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(19, resource.BinarySI),
|
||||
})),
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||
|
@ -27,7 +27,6 @@ import (
|
||||
"k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/component-base/featuregate"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
@ -38,7 +37,7 @@ var (
|
||||
extendedResourceB = v1.ResourceName("example.com/bbb")
|
||||
kubernetesIOResourceA = v1.ResourceName("kubernetes.io/something")
|
||||
kubernetesIOResourceB = v1.ResourceName("subdomain.kubernetes.io/something")
|
||||
hugePageResourceA = v1helper.HugePageResourceName(resource.MustParse("2Mi"))
|
||||
hugePageResourceA = v1.ResourceName(v1.ResourceHugePagesPrefix + "2Mi")
|
||||
)
|
||||
|
||||
func makeResources(milliCPU, memory, pods, extendedA, storage, hugePageA int64) v1.NodeResources {
|
||||
@ -68,8 +67,21 @@ func makeAllocatableResources(milliCPU, memory, pods, extendedA, storage, hugePa
|
||||
func newResourcePod(usage ...framework.Resource) *v1.Pod {
|
||||
var containers []v1.Container
|
||||
for _, req := range usage {
|
||||
rl := v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(req.MilliCPU, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(req.Memory, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(int64(req.AllowedPodNumber), resource.BinarySI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(req.EphemeralStorage, resource.BinarySI),
|
||||
}
|
||||
for rName, rQuant := range req.ScalarResources {
|
||||
if rName == hugePageResourceA {
|
||||
rl[rName] = *resource.NewQuantity(rQuant, resource.BinarySI)
|
||||
} else {
|
||||
rl[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI)
|
||||
}
|
||||
}
|
||||
containers = append(containers, v1.Container{
|
||||
Resources: v1.ResourceRequirements{Requests: req.ResourceList()},
|
||||
Resources: v1.ResourceRequirements{Requests: rl},
|
||||
})
|
||||
}
|
||||
return &v1.Pod{
|
||||
|
@ -25,15 +25,13 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog/v2"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
@ -496,24 +494,6 @@ func (r *Resource) Add(rl v1.ResourceList) {
|
||||
}
|
||||
}
|
||||
|
||||
// ResourceList returns a resource list of this resource.
|
||||
func (r *Resource) ResourceList() v1.ResourceList {
|
||||
result := v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(r.MilliCPU, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(r.Memory, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(int64(r.AllowedPodNumber), resource.BinarySI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(r.EphemeralStorage, resource.BinarySI),
|
||||
}
|
||||
for rName, rQuant := range r.ScalarResources {
|
||||
if v1helper.IsHugePageResourceName(rName) {
|
||||
result[rName] = *resource.NewQuantity(rQuant, resource.BinarySI)
|
||||
} else {
|
||||
result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Clone returns a copy of this resource.
|
||||
func (r *Resource) Clone() *Resource {
|
||||
res := &Resource{
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@ -71,54 +71,6 @@ func TestNewResource(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceList(t *testing.T) {
|
||||
tests := []struct {
|
||||
resource *Resource
|
||||
expected v1.ResourceList
|
||||
}{
|
||||
{
|
||||
resource: &Resource{},
|
||||
expected: map[v1.ResourceName]resource.Quantity{
|
||||
v1.ResourceCPU: *resource.NewScaledQuantity(0, -3),
|
||||
v1.ResourceMemory: *resource.NewQuantity(0, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.BinarySI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(0, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
{
|
||||
resource: &Resource{
|
||||
MilliCPU: 4,
|
||||
Memory: 2000,
|
||||
EphemeralStorage: 5000,
|
||||
AllowedPodNumber: 80,
|
||||
ScalarResources: map[v1.ResourceName]int64{
|
||||
"scalar.test/scalar1": 1,
|
||||
"hugepages-test": 2,
|
||||
"attachable-volumes-aws-ebs": 39,
|
||||
},
|
||||
},
|
||||
expected: map[v1.ResourceName]resource.Quantity{
|
||||
v1.ResourceCPU: *resource.NewScaledQuantity(4, -3),
|
||||
v1.ResourceMemory: *resource.NewQuantity(2000, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(80, resource.BinarySI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
|
||||
"scalar.test/" + "scalar1": *resource.NewQuantity(1, resource.DecimalSI),
|
||||
"attachable-volumes-aws-ebs": *resource.NewQuantity(39, resource.DecimalSI),
|
||||
v1.ResourceHugePagesPrefix + "test": *resource.NewQuantity(2, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("case_%d", i), func(t *testing.T) {
|
||||
rl := test.resource.ResourceList()
|
||||
if !reflect.DeepEqual(test.expected, rl) {
|
||||
t.Errorf("expected: %#v, got: %#v", test.expected, rl)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceClone(t *testing.T) {
|
||||
tests := []struct {
|
||||
resource *Resource
|
||||
|
Loading…
Reference in New Issue
Block a user