mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Removing GetPodPriority from pkg/api and importing PodPriority from k8s.io/component-helpers
This commit is contained in:
parent
95ad020a75
commit
274c536da3
@ -352,14 +352,3 @@ func UpdatePodCondition(status *v1.PodStatus, condition *v1.PodCondition) bool {
|
||||
// Return true if one of the fields have changed.
|
||||
return !isEqual
|
||||
}
|
||||
|
||||
// GetPodPriority returns priority of the given pod.
|
||||
func GetPodPriority(pod *v1.Pod) int32 {
|
||||
if pod.Spec.Priority != nil {
|
||||
return *pod.Spec.Priority
|
||||
}
|
||||
// When priority of a running pod is nil, it means it was created at a time
|
||||
// that there was no global default priority class and the priority class
|
||||
// name of the pod was empty. So, we resolve to the static default priority.
|
||||
return 0
|
||||
}
|
||||
|
@ -830,39 +830,3 @@ func TestUpdatePodCondition(t *testing.T) {
|
||||
assert.Equal(t, test.expected, resultStatus, test.desc)
|
||||
}
|
||||
}
|
||||
|
||||
// TestGetPodPriority tests GetPodPriority function.
|
||||
func TestGetPodPriority(t *testing.T) {
|
||||
p := int32(20)
|
||||
tests := []struct {
|
||||
name string
|
||||
pod *v1.Pod
|
||||
expectedPriority int32
|
||||
}{
|
||||
{
|
||||
name: "no priority pod resolves to static default priority",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{Containers: []v1.Container{
|
||||
{Name: "container", Image: "image"}},
|
||||
},
|
||||
},
|
||||
expectedPriority: 0,
|
||||
},
|
||||
{
|
||||
name: "pod with priority resolves correctly",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{Containers: []v1.Container{
|
||||
{Name: "container", Image: "image"}},
|
||||
Priority: &p,
|
||||
},
|
||||
},
|
||||
expectedPriority: p,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
if GetPodPriority(test.pod) != test.expectedPriority {
|
||||
t.Errorf("expected pod priority: %v, got %v", test.expectedPriority, GetPodPriority(test.pod))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -48,7 +48,6 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/eviction",
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/api/v1/resource:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/apis/core/v1/helper/qos:go_default_library",
|
||||
@ -68,6 +67,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/component-helpers/scheduling/corev1:go_default_library",
|
||||
"//staging/src/k8s.io/kubelet/pkg/apis/stats/v1alpha1:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
] + select({
|
||||
|
@ -26,9 +26,9 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
|
||||
"k8s.io/klog/v2"
|
||||
statsapi "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
v1resource "k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
@ -528,8 +528,8 @@ func (ms *multiSorter) Less(i, j int) bool {
|
||||
|
||||
// priority compares pods by Priority, if priority is enabled.
|
||||
func priority(p1, p2 *v1.Pod) int {
|
||||
priority1 := pod.GetPodPriority(p1)
|
||||
priority2 := pod.GetPodPriority(p2)
|
||||
priority1 := corev1helpers.PodPriority(p1)
|
||||
priority2 := corev1helpers.PodPriority(p2)
|
||||
if priority1 == priority2 {
|
||||
return 0
|
||||
}
|
||||
|
@ -9,7 +9,6 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/core",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/framework/runtime:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
@ -22,6 +21,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/component-helpers/scheduling/corev1:go_default_library",
|
||||
"//staging/src/k8s.io/kube-scheduler/extender/v1:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
"//vendor/k8s.io/utils/trace:go_default_library",
|
||||
|
@ -29,8 +29,8 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
|
||||
extenderv1 "k8s.io/kube-scheduler/extender/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
@ -389,7 +389,7 @@ func addNominatedPods(ctx context.Context, ph framework.PreemptHandle, pod *v1.P
|
||||
stateOut := state.Clone()
|
||||
podsAdded := false
|
||||
for _, p := range nominatedPods {
|
||||
if podutil.GetPodPriority(p) >= podutil.GetPodPriority(pod) && p.UID != pod.UID {
|
||||
if corev1helpers.PodPriority(p) >= corev1helpers.PodPriority(pod) && p.UID != pod.UID {
|
||||
nodeInfoOut.AddPod(p)
|
||||
status := ph.RunPreFilterExtensionAddPod(ctx, stateOut, pod, p, nodeInfoOut)
|
||||
if !status.IsSuccess() {
|
||||
|
@ -9,7 +9,6 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpreemption",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/core:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
@ -26,6 +25,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/component-helpers/scheduling/corev1:go_default_library",
|
||||
"//staging/src/k8s.io/kube-scheduler/extender/v1:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
],
|
||||
|
@ -35,8 +35,8 @@ import (
|
||||
"k8s.io/client-go/kubernetes"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
policylisters "k8s.io/client-go/listers/policy/v1beta1"
|
||||
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
|
||||
extenderv1 "k8s.io/kube-scheduler/extender/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/core"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
@ -206,9 +206,9 @@ func PodEligibleToPreemptOthers(pod *v1.Pod, nodeInfos framework.NodeInfoLister,
|
||||
}
|
||||
|
||||
if nodeInfo, _ := nodeInfos.Get(nomNodeName); nodeInfo != nil {
|
||||
podPriority := podutil.GetPodPriority(pod)
|
||||
podPriority := corev1helpers.PodPriority(pod)
|
||||
for _, p := range nodeInfo.Pods {
|
||||
if p.Pod.DeletionTimestamp != nil && podutil.GetPodPriority(p.Pod) < podPriority {
|
||||
if p.Pod.DeletionTimestamp != nil && corev1helpers.PodPriority(p.Pod) < podPriority {
|
||||
// There is a terminating pod on the nominated node.
|
||||
return false
|
||||
}
|
||||
@ -392,7 +392,7 @@ func pickOneNodeForPreemption(nodesToVictims map[string]*extenderv1.Victims) str
|
||||
node := minNodes1[i]
|
||||
victims := nodesToVictims[node]
|
||||
// highestPodPriority is the highest priority among the victims on this node.
|
||||
highestPodPriority := podutil.GetPodPriority(victims.Pods[0])
|
||||
highestPodPriority := corev1helpers.PodPriority(victims.Pods[0])
|
||||
if highestPodPriority < minHighestPriority {
|
||||
minHighestPriority = highestPodPriority
|
||||
lenNodes2 = 0
|
||||
@ -418,7 +418,7 @@ func pickOneNodeForPreemption(nodesToVictims map[string]*extenderv1.Victims) str
|
||||
// needed so that a node with a few pods with negative priority is not
|
||||
// picked over a node with a smaller number of pods with the same negative
|
||||
// priority (and similar scenarios).
|
||||
sumPriorities += int64(podutil.GetPodPriority(pod)) + int64(math.MaxInt32+1)
|
||||
sumPriorities += int64(corev1helpers.PodPriority(pod)) + int64(math.MaxInt32+1)
|
||||
}
|
||||
if sumPriorities < minSumPriorities {
|
||||
minSumPriorities = sumPriorities
|
||||
@ -525,9 +525,9 @@ func selectVictimsOnNode(
|
||||
}
|
||||
// As the first step, remove all the lower priority pods from the node and
|
||||
// check if the given pod can be scheduled.
|
||||
podPriority := podutil.GetPodPriority(pod)
|
||||
podPriority := corev1helpers.PodPriority(pod)
|
||||
for _, p := range nodeInfo.Pods {
|
||||
if podutil.GetPodPriority(p.Pod) < podPriority {
|
||||
if corev1helpers.PodPriority(p.Pod) < podPriority {
|
||||
potentialVictims = append(potentialVictims, p.Pod)
|
||||
if err := removePod(p.Pod); err != nil {
|
||||
return nil, 0, false
|
||||
@ -639,9 +639,9 @@ func getLowerPriorityNominatedPods(pn framework.PodNominator, pod *v1.Pod, nodeN
|
||||
}
|
||||
|
||||
var lowerPriorityPods []*v1.Pod
|
||||
podPriority := podutil.GetPodPriority(pod)
|
||||
podPriority := corev1helpers.PodPriority(pod)
|
||||
for _, p := range pods {
|
||||
if podutil.GetPodPriority(p) < podPriority {
|
||||
if corev1helpers.PodPriority(p) < podPriority {
|
||||
lowerPriorityPods = append(lowerPriorityPods, p)
|
||||
}
|
||||
}
|
||||
|
@ -6,9 +6,9 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/component-helpers/scheduling/corev1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -18,7 +18,7 @@ package queuesort
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
)
|
||||
|
||||
@ -39,8 +39,8 @@ func (pl *PrioritySort) Name() string {
|
||||
// It sorts pods based on their priority. When priorities are equal, it uses
|
||||
// PodQueueInfo.timestamp.
|
||||
func (pl *PrioritySort) Less(pInfo1, pInfo2 *framework.QueuedPodInfo) bool {
|
||||
p1 := pod.GetPodPriority(pInfo1.Pod)
|
||||
p2 := pod.GetPodPriority(pInfo2.Pod)
|
||||
p1 := corev1helpers.PodPriority(pInfo1.Pod)
|
||||
p2 := corev1helpers.PodPriority(pInfo2.Pod)
|
||||
return (p1 > p2) || (p1 == p2 && pInfo1.Timestamp.Before(pInfo2.Timestamp))
|
||||
}
|
||||
|
||||
|
@ -13,7 +13,6 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/testing",
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/framework/runtime:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
@ -23,6 +22,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/component-helpers/scheduling/corev1:go_default_library",
|
||||
"//staging/src/k8s.io/kube-scheduler/extender/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -23,8 +23,8 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
|
||||
extenderv1 "k8s.io/kube-scheduler/extender/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/util"
|
||||
@ -235,9 +235,9 @@ func (f *FakeExtender) selectVictimsOnNodeByExtender(pod *v1.Pod, node *v1.Node)
|
||||
}
|
||||
// As the first step, remove all the lower priority pods from the node and
|
||||
// check if the given pod can be scheduled.
|
||||
podPriority := podutil.GetPodPriority(pod)
|
||||
podPriority := corev1helpers.PodPriority(pod)
|
||||
for _, p := range nodeInfoCopy.Pods {
|
||||
if podutil.GetPodPriority(p.Pod) < podPriority {
|
||||
if corev1helpers.PodPriority(p.Pod) < podPriority {
|
||||
potentialVictims = append(potentialVictims, p.Pod)
|
||||
removePod(p.Pod)
|
||||
}
|
||||
|
@ -39,7 +39,6 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/util",
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
@ -51,6 +50,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/strategicpatch:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/component-helpers/scheduling/corev1:go_default_library",
|
||||
"//staging/src/k8s.io/kube-scheduler/extender/v1:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
],
|
||||
|
@ -28,9 +28,9 @@ import (
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
|
||||
"k8s.io/klog/v2"
|
||||
extenderv1 "k8s.io/kube-scheduler/extender/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
)
|
||||
|
||||
// GetPodFullName returns a name that uniquely identifies a pod.
|
||||
@ -60,15 +60,15 @@ func GetEarliestPodStartTime(victims *extenderv1.Victims) *metav1.Time {
|
||||
}
|
||||
|
||||
earliestPodStartTime := GetPodStartTime(victims.Pods[0])
|
||||
maxPriority := podutil.GetPodPriority(victims.Pods[0])
|
||||
maxPriority := corev1helpers.PodPriority(victims.Pods[0])
|
||||
|
||||
for _, pod := range victims.Pods {
|
||||
if podutil.GetPodPriority(pod) == maxPriority {
|
||||
if corev1helpers.PodPriority(pod) == maxPriority {
|
||||
if GetPodStartTime(pod).Before(earliestPodStartTime) {
|
||||
earliestPodStartTime = GetPodStartTime(pod)
|
||||
}
|
||||
} else if podutil.GetPodPriority(pod) > maxPriority {
|
||||
maxPriority = podutil.GetPodPriority(pod)
|
||||
} else if corev1helpers.PodPriority(pod) > maxPriority {
|
||||
maxPriority = corev1helpers.PodPriority(pod)
|
||||
earliestPodStartTime = GetPodStartTime(pod)
|
||||
}
|
||||
}
|
||||
@ -81,8 +81,8 @@ func GetEarliestPodStartTime(victims *extenderv1.Victims) *metav1.Time {
|
||||
// It takes arguments of the type "interface{}" to be used with SortableList,
|
||||
// but expects those arguments to be *v1.Pod.
|
||||
func MoreImportantPod(pod1, pod2 *v1.Pod) bool {
|
||||
p1 := podutil.GetPodPriority(pod1)
|
||||
p2 := podutil.GetPodPriority(pod2)
|
||||
p1 := corev1helpers.PodPriority(pod1)
|
||||
p2 := corev1helpers.PodPriority(pod2)
|
||||
if p1 != p2 {
|
||||
return p1 > p2
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user