mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
feat: cache pod limits as part of metadata in priority functions
This commit is contained in:
parent
c85c0e4780
commit
cd3aac34ea
@ -46,6 +46,7 @@ func NewPriorityMetadataFactory(serviceLister algorithm.ServiceLister, controlle
|
||||
// priorityMetadata is a type that is passed as metadata for priority functions
|
||||
type priorityMetadata struct {
|
||||
nonZeroRequest *schedulernodeinfo.Resource
|
||||
podLimits *schedulernodeinfo.Resource
|
||||
podTolerations []v1.Toleration
|
||||
affinity *v1.Affinity
|
||||
podSelectors []labels.Selector
|
||||
@ -62,6 +63,7 @@ func (pmf *PriorityMetadataFactory) PriorityMetadata(pod *v1.Pod, nodeNameToInfo
|
||||
}
|
||||
return &priorityMetadata{
|
||||
nonZeroRequest: getNonZeroRequests(pod),
|
||||
podLimits: getResourceLimits(pod),
|
||||
podTolerations: getAllTolerationPreferNoSchedule(pod.Spec.Tolerations),
|
||||
affinity: pod.Spec.Affinity,
|
||||
podSelectors: getSelectors(pod, pmf.serviceLister, pmf.controllerLister, pmf.replicaSetLister, pmf.statefulSetLister),
|
||||
|
@ -38,6 +38,12 @@ func TestPriorityMetadata(t *testing.T) {
|
||||
specifiedReqs.MilliCPU = 200
|
||||
specifiedReqs.Memory = 2000
|
||||
|
||||
nonPodLimits := &schedulernodeinfo.Resource{}
|
||||
|
||||
specifiedPodLimits := &schedulernodeinfo.Resource{}
|
||||
specifiedPodLimits.MilliCPU = 200
|
||||
specifiedPodLimits.Memory = 2000
|
||||
|
||||
tolerations := []v1.Toleration{{
|
||||
Key: "foo",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
@ -104,6 +110,10 @@ func TestPriorityMetadata(t *testing.T) {
|
||||
Image: "image",
|
||||
ImagePullPolicy: "Always",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("200m"),
|
||||
v1.ResourceMemory: resource.MustParse("2000"),
|
||||
},
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("200m"),
|
||||
v1.ResourceMemory: resource.MustParse("2000"),
|
||||
@ -128,6 +138,7 @@ func TestPriorityMetadata(t *testing.T) {
|
||||
pod: podWithTolerationsAndAffinity,
|
||||
expected: &priorityMetadata{
|
||||
nonZeroRequest: nonZeroReqs,
|
||||
podLimits: nonPodLimits,
|
||||
podTolerations: tolerations,
|
||||
affinity: podAffinity,
|
||||
},
|
||||
@ -137,6 +148,7 @@ func TestPriorityMetadata(t *testing.T) {
|
||||
pod: podWithTolerationsAndRequests,
|
||||
expected: &priorityMetadata{
|
||||
nonZeroRequest: specifiedReqs,
|
||||
podLimits: nonPodLimits,
|
||||
podTolerations: tolerations,
|
||||
affinity: nil,
|
||||
},
|
||||
@ -146,6 +158,7 @@ func TestPriorityMetadata(t *testing.T) {
|
||||
pod: podWithAffinityAndRequests,
|
||||
expected: &priorityMetadata{
|
||||
nonZeroRequest: specifiedReqs,
|
||||
podLimits: specifiedPodLimits,
|
||||
podTolerations: nil,
|
||||
affinity: podAffinity,
|
||||
},
|
||||
|
@ -42,7 +42,14 @@ func ResourceLimitsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedule
|
||||
allocatableResources := nodeInfo.AllocatableResource()
|
||||
|
||||
// compute pod limits
|
||||
podLimits := getResourceLimits(pod)
|
||||
var podLimits *schedulernodeinfo.Resource
|
||||
if priorityMeta, ok := meta.(*priorityMetadata); ok && priorityMeta != nil {
|
||||
// We were able to parse metadata, use podLimits from there.
|
||||
podLimits = priorityMeta.podLimits
|
||||
} else {
|
||||
// We couldn't parse metadata - fallback to computing it.
|
||||
podLimits = getResourceLimits(pod)
|
||||
}
|
||||
|
||||
cpuScore := computeScore(podLimits.MilliCPU, allocatableResources.MilliCPU)
|
||||
memScore := computeScore(podLimits.Memory, allocatableResources.Memory)
|
||||
@ -83,7 +90,6 @@ func computeScore(limit, allocatable int64) int64 {
|
||||
// The reason to create this new function is to be consistent with other
|
||||
// priority functions because most or perhaps all priority functions work
|
||||
// with schedulernodeinfo.Resource.
|
||||
// TODO: cache it as part of metadata passed to priority functions.
|
||||
func getResourceLimits(pod *v1.Pod) *schedulernodeinfo.Resource {
|
||||
result := &schedulernodeinfo.Resource{}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
|
@ -27,7 +27,7 @@ import (
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
func TestResourceLimistPriority(t *testing.T) {
|
||||
func TestResourceLimitsPriority(t *testing.T) {
|
||||
noResources := v1.PodSpec{
|
||||
Containers: []v1.Container{},
|
||||
}
|
||||
@ -140,13 +140,23 @@ func TestResourceLimistPriority(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(nil, test.nodes)
|
||||
list, err := priorityFunction(ResourceLimitsPriorityMap, nil, nil)(test.pod, nodeNameToInfo, test.nodes)
|
||||
|
||||
for _, hasMeta := range []bool{true, false} {
|
||||
var metadata *priorityMetadata
|
||||
if hasMeta {
|
||||
metadata = &priorityMetadata{
|
||||
podLimits: getResourceLimits(test.pod),
|
||||
}
|
||||
}
|
||||
|
||||
list, err := priorityFunction(ResourceLimitsPriorityMap, nil, metadata)(test.pod, nodeNameToInfo, test.nodes)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.expectedList, list) {
|
||||
t.Errorf("expected %#v, got %#v", test.expectedList, list)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user