mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 12:15:52 +00:00
feat: cache pod limits as part of metadata in priority functions
This commit is contained in:
parent
c85c0e4780
commit
cd3aac34ea
@ -46,6 +46,7 @@ func NewPriorityMetadataFactory(serviceLister algorithm.ServiceLister, controlle
|
|||||||
// priorityMetadata is a type that is passed as metadata for priority functions
|
// priorityMetadata is a type that is passed as metadata for priority functions
|
||||||
type priorityMetadata struct {
|
type priorityMetadata struct {
|
||||||
nonZeroRequest *schedulernodeinfo.Resource
|
nonZeroRequest *schedulernodeinfo.Resource
|
||||||
|
podLimits *schedulernodeinfo.Resource
|
||||||
podTolerations []v1.Toleration
|
podTolerations []v1.Toleration
|
||||||
affinity *v1.Affinity
|
affinity *v1.Affinity
|
||||||
podSelectors []labels.Selector
|
podSelectors []labels.Selector
|
||||||
@ -62,6 +63,7 @@ func (pmf *PriorityMetadataFactory) PriorityMetadata(pod *v1.Pod, nodeNameToInfo
|
|||||||
}
|
}
|
||||||
return &priorityMetadata{
|
return &priorityMetadata{
|
||||||
nonZeroRequest: getNonZeroRequests(pod),
|
nonZeroRequest: getNonZeroRequests(pod),
|
||||||
|
podLimits: getResourceLimits(pod),
|
||||||
podTolerations: getAllTolerationPreferNoSchedule(pod.Spec.Tolerations),
|
podTolerations: getAllTolerationPreferNoSchedule(pod.Spec.Tolerations),
|
||||||
affinity: pod.Spec.Affinity,
|
affinity: pod.Spec.Affinity,
|
||||||
podSelectors: getSelectors(pod, pmf.serviceLister, pmf.controllerLister, pmf.replicaSetLister, pmf.statefulSetLister),
|
podSelectors: getSelectors(pod, pmf.serviceLister, pmf.controllerLister, pmf.replicaSetLister, pmf.statefulSetLister),
|
||||||
|
@ -38,6 +38,12 @@ func TestPriorityMetadata(t *testing.T) {
|
|||||||
specifiedReqs.MilliCPU = 200
|
specifiedReqs.MilliCPU = 200
|
||||||
specifiedReqs.Memory = 2000
|
specifiedReqs.Memory = 2000
|
||||||
|
|
||||||
|
nonPodLimits := &schedulernodeinfo.Resource{}
|
||||||
|
|
||||||
|
specifiedPodLimits := &schedulernodeinfo.Resource{}
|
||||||
|
specifiedPodLimits.MilliCPU = 200
|
||||||
|
specifiedPodLimits.Memory = 2000
|
||||||
|
|
||||||
tolerations := []v1.Toleration{{
|
tolerations := []v1.Toleration{{
|
||||||
Key: "foo",
|
Key: "foo",
|
||||||
Operator: v1.TolerationOpEqual,
|
Operator: v1.TolerationOpEqual,
|
||||||
@ -104,6 +110,10 @@ func TestPriorityMetadata(t *testing.T) {
|
|||||||
Image: "image",
|
Image: "image",
|
||||||
ImagePullPolicy: "Always",
|
ImagePullPolicy: "Always",
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
|
Limits: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("200m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("2000"),
|
||||||
|
},
|
||||||
Requests: v1.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
v1.ResourceCPU: resource.MustParse("200m"),
|
v1.ResourceCPU: resource.MustParse("200m"),
|
||||||
v1.ResourceMemory: resource.MustParse("2000"),
|
v1.ResourceMemory: resource.MustParse("2000"),
|
||||||
@ -128,6 +138,7 @@ func TestPriorityMetadata(t *testing.T) {
|
|||||||
pod: podWithTolerationsAndAffinity,
|
pod: podWithTolerationsAndAffinity,
|
||||||
expected: &priorityMetadata{
|
expected: &priorityMetadata{
|
||||||
nonZeroRequest: nonZeroReqs,
|
nonZeroRequest: nonZeroReqs,
|
||||||
|
podLimits: nonPodLimits,
|
||||||
podTolerations: tolerations,
|
podTolerations: tolerations,
|
||||||
affinity: podAffinity,
|
affinity: podAffinity,
|
||||||
},
|
},
|
||||||
@ -137,6 +148,7 @@ func TestPriorityMetadata(t *testing.T) {
|
|||||||
pod: podWithTolerationsAndRequests,
|
pod: podWithTolerationsAndRequests,
|
||||||
expected: &priorityMetadata{
|
expected: &priorityMetadata{
|
||||||
nonZeroRequest: specifiedReqs,
|
nonZeroRequest: specifiedReqs,
|
||||||
|
podLimits: nonPodLimits,
|
||||||
podTolerations: tolerations,
|
podTolerations: tolerations,
|
||||||
affinity: nil,
|
affinity: nil,
|
||||||
},
|
},
|
||||||
@ -146,6 +158,7 @@ func TestPriorityMetadata(t *testing.T) {
|
|||||||
pod: podWithAffinityAndRequests,
|
pod: podWithAffinityAndRequests,
|
||||||
expected: &priorityMetadata{
|
expected: &priorityMetadata{
|
||||||
nonZeroRequest: specifiedReqs,
|
nonZeroRequest: specifiedReqs,
|
||||||
|
podLimits: specifiedPodLimits,
|
||||||
podTolerations: nil,
|
podTolerations: nil,
|
||||||
affinity: podAffinity,
|
affinity: podAffinity,
|
||||||
},
|
},
|
||||||
|
@ -42,7 +42,14 @@ func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedule
|
|||||||
allocatableResources := nodeInfo.AllocatableResource()
|
allocatableResources := nodeInfo.AllocatableResource()
|
||||||
|
|
||||||
// compute pod limits
|
// compute pod limits
|
||||||
podLimits := getResourceLimits(pod)
|
var podLimits *schedulernodeinfo.Resource
|
||||||
|
if priorityMeta, ok := meta.(*priorityMetadata); ok && priorityMeta != nil {
|
||||||
|
// We were able to parse metadata, use podLimits from there.
|
||||||
|
podLimits = priorityMeta.podLimits
|
||||||
|
} else {
|
||||||
|
// We couldn't parse metadata - fallback to computing it.
|
||||||
|
podLimits = getResourceLimits(pod)
|
||||||
|
}
|
||||||
|
|
||||||
cpuScore := computeScore(podLimits.MilliCPU, allocatableResources.MilliCPU)
|
cpuScore := computeScore(podLimits.MilliCPU, allocatableResources.MilliCPU)
|
||||||
memScore := computeScore(podLimits.Memory, allocatableResources.Memory)
|
memScore := computeScore(podLimits.Memory, allocatableResources.Memory)
|
||||||
@ -83,7 +90,6 @@ func computeScore(limit, allocatable int64) int64 {
|
|||||||
// The reason to create this new function is to be consistent with other
|
// The reason to create this new function is to be consistent with other
|
||||||
// priority functions because most or perhaps all priority functions work
|
// priority functions because most or perhaps all priority functions work
|
||||||
// with schedulernodeinfo.Resource.
|
// with schedulernodeinfo.Resource.
|
||||||
// TODO: cache it as part of metadata passed to priority functions.
|
|
||||||
func getResourceLimits(pod *v1.Pod) *schedulernodeinfo.Resource {
|
func getResourceLimits(pod *v1.Pod) *schedulernodeinfo.Resource {
|
||||||
result := &schedulernodeinfo.Resource{}
|
result := &schedulernodeinfo.Resource{}
|
||||||
for _, container := range pod.Spec.Containers {
|
for _, container := range pod.Spec.Containers {
|
||||||
|
@ -27,7 +27,7 @@ import (
|
|||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestResourceLimistPriority(t *testing.T) {
|
func TestResourceLimitsPriority(t *testing.T) {
|
||||||
noResources := v1.PodSpec{
|
noResources := v1.PodSpec{
|
||||||
Containers: []v1.Container{},
|
Containers: []v1.Container{},
|
||||||
}
|
}
|
||||||
@ -140,13 +140,23 @@ func TestResourceLimistPriority(t *testing.T) {
|
|||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(nil, test.nodes)
|
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(nil, test.nodes)
|
||||||
list, err := priorityFunction(ResourceLimitsPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes)
|
|
||||||
|
for _, hasMeta := range []bool{true, false} {
|
||||||
|
var metadata *priorityMetadata
|
||||||
|
if hasMeta {
|
||||||
|
metadata = &priorityMetadata{
|
||||||
|
podLimits: getResourceLimits(test.pod),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
list, err := priorityFunction(ResourceLimitsPriorityMap, nil, metadata)(test.pod, nodeNameToInfo, test.nodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(test.expectedList, list) {
|
if !reflect.DeepEqual(test.expectedList, list) {
|
||||||
t.Errorf("expected %#v, got %#v", test.expectedList, list)
|
t.Errorf("expected %#v, got %#v", test.expectedList, list)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user