mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 05:57:25 +00:00
Merge pull request #101465 from ingvagabund/scheduler-drop-Resource-ResourceList-method
pkg/scheduler: drop Resource.ResourceList() method
This commit is contained in:
commit
b9e86716b9
@ -20,7 +20,7 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||||
@ -144,11 +144,11 @@ func makeAllocatableResources(milliCPU, memory, pods, extendedA, storage, hugePa
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func newResourcePod(usage ...schedulerframework.Resource) *v1.Pod {
|
func newResourcePod(containerResources ...v1.ResourceList) *v1.Pod {
|
||||||
containers := []v1.Container{}
|
containers := []v1.Container{}
|
||||||
for _, req := range usage {
|
for _, rl := range containerResources {
|
||||||
containers = append(containers, v1.Container{
|
containers = append(containers, v1.Container{
|
||||||
Resources: v1.ResourceRequirements{Requests: req.ResourceList()},
|
Resources: v1.ResourceRequirements{Requests: rl},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return &v1.Pod{
|
return &v1.Pod{
|
||||||
@ -187,7 +187,10 @@ func TestGeneralPredicates(t *testing.T) {
|
|||||||
{
|
{
|
||||||
pod: &v1.Pod{},
|
pod: &v1.Pod{},
|
||||||
nodeInfo: schedulerframework.NewNodeInfo(
|
nodeInfo: schedulerframework.NewNodeInfo(
|
||||||
newResourcePod(schedulerframework.Resource{MilliCPU: 9, Memory: 19})),
|
newResourcePod(v1.ResourceList{
|
||||||
|
v1.ResourceCPU: *resource.NewMilliQuantity(9, resource.DecimalSI),
|
||||||
|
v1.ResourceMemory: *resource.NewQuantity(19, resource.BinarySI),
|
||||||
|
})),
|
||||||
node: &v1.Node{
|
node: &v1.Node{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
|
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
|
||||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||||
@ -197,9 +200,15 @@ func TestGeneralPredicates(t *testing.T) {
|
|||||||
name: "no resources/port/host requested always fits",
|
name: "no resources/port/host requested always fits",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: newResourcePod(schedulerframework.Resource{MilliCPU: 8, Memory: 10}),
|
pod: newResourcePod(v1.ResourceList{
|
||||||
|
v1.ResourceCPU: *resource.NewMilliQuantity(8, resource.DecimalSI),
|
||||||
|
v1.ResourceMemory: *resource.NewQuantity(10, resource.BinarySI),
|
||||||
|
}),
|
||||||
nodeInfo: schedulerframework.NewNodeInfo(
|
nodeInfo: schedulerframework.NewNodeInfo(
|
||||||
newResourcePod(schedulerframework.Resource{MilliCPU: 5, Memory: 19})),
|
newResourcePod(v1.ResourceList{
|
||||||
|
v1.ResourceCPU: *resource.NewMilliQuantity(5, resource.DecimalSI),
|
||||||
|
v1.ResourceMemory: *resource.NewQuantity(19, resource.BinarySI),
|
||||||
|
})),
|
||||||
node: &v1.Node{
|
node: &v1.Node{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
|
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
|
||||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 0, 0, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 0, 0, 0)},
|
||||||
|
@ -27,7 +27,6 @@ import (
|
|||||||
"k8s.io/apiserver/pkg/util/feature"
|
"k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/component-base/featuregate"
|
"k8s.io/component-base/featuregate"
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||||
@ -38,7 +37,7 @@ var (
|
|||||||
extendedResourceB = v1.ResourceName("example.com/bbb")
|
extendedResourceB = v1.ResourceName("example.com/bbb")
|
||||||
kubernetesIOResourceA = v1.ResourceName("kubernetes.io/something")
|
kubernetesIOResourceA = v1.ResourceName("kubernetes.io/something")
|
||||||
kubernetesIOResourceB = v1.ResourceName("subdomain.kubernetes.io/something")
|
kubernetesIOResourceB = v1.ResourceName("subdomain.kubernetes.io/something")
|
||||||
hugePageResourceA = v1helper.HugePageResourceName(resource.MustParse("2Mi"))
|
hugePageResourceA = v1.ResourceName(v1.ResourceHugePagesPrefix + "2Mi")
|
||||||
)
|
)
|
||||||
|
|
||||||
func makeResources(milliCPU, memory, pods, extendedA, storage, hugePageA int64) v1.NodeResources {
|
func makeResources(milliCPU, memory, pods, extendedA, storage, hugePageA int64) v1.NodeResources {
|
||||||
@ -68,8 +67,21 @@ func makeAllocatableResources(milliCPU, memory, pods, extendedA, storage, hugePa
|
|||||||
func newResourcePod(usage ...framework.Resource) *v1.Pod {
|
func newResourcePod(usage ...framework.Resource) *v1.Pod {
|
||||||
var containers []v1.Container
|
var containers []v1.Container
|
||||||
for _, req := range usage {
|
for _, req := range usage {
|
||||||
|
rl := v1.ResourceList{
|
||||||
|
v1.ResourceCPU: *resource.NewMilliQuantity(req.MilliCPU, resource.DecimalSI),
|
||||||
|
v1.ResourceMemory: *resource.NewQuantity(req.Memory, resource.BinarySI),
|
||||||
|
v1.ResourcePods: *resource.NewQuantity(int64(req.AllowedPodNumber), resource.BinarySI),
|
||||||
|
v1.ResourceEphemeralStorage: *resource.NewQuantity(req.EphemeralStorage, resource.BinarySI),
|
||||||
|
}
|
||||||
|
for rName, rQuant := range req.ScalarResources {
|
||||||
|
if rName == hugePageResourceA {
|
||||||
|
rl[rName] = *resource.NewQuantity(rQuant, resource.BinarySI)
|
||||||
|
} else {
|
||||||
|
rl[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI)
|
||||||
|
}
|
||||||
|
}
|
||||||
containers = append(containers, v1.Container{
|
containers = append(containers, v1.Container{
|
||||||
Resources: v1.ResourceRequirements{Requests: req.ResourceList()},
|
Resources: v1.ResourceRequirements{Requests: rl},
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return &v1.Pod{
|
return &v1.Pod{
|
||||||
|
@ -25,15 +25,13 @@ import (
|
|||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||||
)
|
)
|
||||||
@ -496,24 +494,6 @@ func (r *Resource) Add(rl v1.ResourceList) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ResourceList returns a resource list of this resource.
|
|
||||||
func (r *Resource) ResourceList() v1.ResourceList {
|
|
||||||
result := v1.ResourceList{
|
|
||||||
v1.ResourceCPU: *resource.NewMilliQuantity(r.MilliCPU, resource.DecimalSI),
|
|
||||||
v1.ResourceMemory: *resource.NewQuantity(r.Memory, resource.BinarySI),
|
|
||||||
v1.ResourcePods: *resource.NewQuantity(int64(r.AllowedPodNumber), resource.BinarySI),
|
|
||||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(r.EphemeralStorage, resource.BinarySI),
|
|
||||||
}
|
|
||||||
for rName, rQuant := range r.ScalarResources {
|
|
||||||
if v1helper.IsHugePageResourceName(rName) {
|
|
||||||
result[rName] = *resource.NewQuantity(rQuant, resource.BinarySI)
|
|
||||||
} else {
|
|
||||||
result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clone returns a copy of this resource.
|
// Clone returns a copy of this resource.
|
||||||
func (r *Resource) Clone() *Resource {
|
func (r *Resource) Clone() *Resource {
|
||||||
res := &Resource{
|
res := &Resource{
|
||||||
|
@ -23,7 +23,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
@ -71,54 +71,6 @@ func TestNewResource(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestResourceList(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
resource *Resource
|
|
||||||
expected v1.ResourceList
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
resource: &Resource{},
|
|
||||||
expected: map[v1.ResourceName]resource.Quantity{
|
|
||||||
v1.ResourceCPU: *resource.NewScaledQuantity(0, -3),
|
|
||||||
v1.ResourceMemory: *resource.NewQuantity(0, resource.BinarySI),
|
|
||||||
v1.ResourcePods: *resource.NewQuantity(0, resource.BinarySI),
|
|
||||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(0, resource.BinarySI),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
resource: &Resource{
|
|
||||||
MilliCPU: 4,
|
|
||||||
Memory: 2000,
|
|
||||||
EphemeralStorage: 5000,
|
|
||||||
AllowedPodNumber: 80,
|
|
||||||
ScalarResources: map[v1.ResourceName]int64{
|
|
||||||
"scalar.test/scalar1": 1,
|
|
||||||
"hugepages-test": 2,
|
|
||||||
"attachable-volumes-aws-ebs": 39,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
expected: map[v1.ResourceName]resource.Quantity{
|
|
||||||
v1.ResourceCPU: *resource.NewScaledQuantity(4, -3),
|
|
||||||
v1.ResourceMemory: *resource.NewQuantity(2000, resource.BinarySI),
|
|
||||||
v1.ResourcePods: *resource.NewQuantity(80, resource.BinarySI),
|
|
||||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
|
|
||||||
"scalar.test/" + "scalar1": *resource.NewQuantity(1, resource.DecimalSI),
|
|
||||||
"attachable-volumes-aws-ebs": *resource.NewQuantity(39, resource.DecimalSI),
|
|
||||||
v1.ResourceHugePagesPrefix + "test": *resource.NewQuantity(2, resource.BinarySI),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for i, test := range tests {
|
|
||||||
t.Run(fmt.Sprintf("case_%d", i), func(t *testing.T) {
|
|
||||||
rl := test.resource.ResourceList()
|
|
||||||
if !reflect.DeepEqual(test.expected, rl) {
|
|
||||||
t.Errorf("expected: %#v, got: %#v", test.expected, rl)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestResourceClone(t *testing.T) {
|
func TestResourceClone(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
resource *Resource
|
resource *Resource
|
||||||
|
Loading…
Reference in New Issue
Block a user