mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #87051 from Huang-Wei/remove-prio-util-pkg
Remove scheduler/algorithm/priorities/util package
This commit is contained in:
commit
942b526e5c
@ -27,7 +27,6 @@ filegroup(
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/scheduler/algorithm/predicates:all-srcs",
|
||||
"//pkg/scheduler/algorithm/priorities:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
|
@ -1,17 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/scheduler/algorithm/priorities/util:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
@ -1,53 +0,0 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"non_zero_test.go",
|
||||
"topologies_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/selection:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"non_zero.go",
|
||||
"topologies.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util",
|
||||
deps = [
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
@ -48,7 +48,6 @@ go_test(
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/apis/extender/v1:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/defaultpodtopologyspread:go_default_library",
|
||||
|
@ -38,7 +38,6 @@ import (
|
||||
clientsetfake "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
algorithmpredicates "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
extenderv1 "k8s.io/kubernetes/pkg/scheduler/apis/extender/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultpodtopologyspread"
|
||||
@ -59,6 +58,7 @@ import (
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -1024,9 +1024,9 @@ func TestZeroRequest(t *testing.T) {
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse(
|
||||
strconv.FormatInt(priorityutil.DefaultMilliCPURequest, 10) + "m"),
|
||||
strconv.FormatInt(schedutil.DefaultMilliCPURequest, 10) + "m"),
|
||||
v1.ResourceMemory: resource.MustParse(
|
||||
strconv.FormatInt(priorityutil.DefaultMemoryRequest, 10)),
|
||||
strconv.FormatInt(schedutil.DefaultMemoryRequest, 10)),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1041,9 +1041,9 @@ func TestZeroRequest(t *testing.T) {
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse(
|
||||
strconv.FormatInt(priorityutil.DefaultMilliCPURequest*3, 10) + "m"),
|
||||
strconv.FormatInt(schedutil.DefaultMilliCPURequest*3, 10) + "m"),
|
||||
v1.ResourceMemory: resource.MustParse(
|
||||
strconv.FormatInt(priorityutil.DefaultMemoryRequest*3, 10)),
|
||||
strconv.FormatInt(schedutil.DefaultMemoryRequest*3, 10)),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1065,7 +1065,7 @@ func TestZeroRequest(t *testing.T) {
|
||||
// and when the zero-request pod is the one being scheduled.
|
||||
{
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
|
||||
nodes: []*v1.Node{makeNode("machine1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, schedutil.DefaultMemoryRequest*10)},
|
||||
name: "test priority of zero-request pod with machine with zero-request pod",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: large1}, {Spec: noResources1},
|
||||
@ -1075,7 +1075,7 @@ func TestZeroRequest(t *testing.T) {
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{Spec: small},
|
||||
nodes: []*v1.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
|
||||
nodes: []*v1.Node{makeNode("machine1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, schedutil.DefaultMemoryRequest*10)},
|
||||
name: "test priority of nonzero-request pod with machine with zero-request pod",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: large1}, {Spec: noResources1},
|
||||
@ -1086,7 +1086,7 @@ func TestZeroRequest(t *testing.T) {
|
||||
// The point of this test is to verify that we're not just getting the same score no matter what we schedule.
|
||||
{
|
||||
pod: &v1.Pod{Spec: large},
|
||||
nodes: []*v1.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
|
||||
nodes: []*v1.Node{makeNode("machine1", 1000, schedutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, schedutil.DefaultMemoryRequest*10)},
|
||||
name: "test priority of larger pod with machine with zero-request pod",
|
||||
pods: []*v1.Pod{
|
||||
{Spec: large1}, {Spec: noResources1},
|
||||
@ -1215,9 +1215,9 @@ var smallContainers = []v1.Container{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"cpu": resource.MustParse(
|
||||
strconv.FormatInt(priorityutil.DefaultMilliCPURequest, 10) + "m"),
|
||||
strconv.FormatInt(schedutil.DefaultMilliCPURequest, 10) + "m"),
|
||||
"memory": resource.MustParse(
|
||||
strconv.FormatInt(priorityutil.DefaultMemoryRequest, 10)),
|
||||
strconv.FormatInt(schedutil.DefaultMemoryRequest, 10)),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1227,9 +1227,9 @@ var mediumContainers = []v1.Container{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"cpu": resource.MustParse(
|
||||
strconv.FormatInt(priorityutil.DefaultMilliCPURequest*2, 10) + "m"),
|
||||
strconv.FormatInt(schedutil.DefaultMilliCPURequest*2, 10) + "m"),
|
||||
"memory": resource.MustParse(
|
||||
strconv.FormatInt(priorityutil.DefaultMemoryRequest*2, 10)),
|
||||
strconv.FormatInt(schedutil.DefaultMemoryRequest*2, 10)),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1239,9 +1239,9 @@ var largeContainers = []v1.Container{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"cpu": resource.MustParse(
|
||||
strconv.FormatInt(priorityutil.DefaultMilliCPURequest*3, 10) + "m"),
|
||||
strconv.FormatInt(schedutil.DefaultMilliCPURequest*3, 10) + "m"),
|
||||
"memory": resource.MustParse(
|
||||
strconv.FormatInt(priorityutil.DefaultMemoryRequest*3, 10)),
|
||||
strconv.FormatInt(schedutil.DefaultMemoryRequest*3, 10)),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1251,9 +1251,9 @@ var veryLargeContainers = []v1.Container{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"cpu": resource.MustParse(
|
||||
strconv.FormatInt(priorityutil.DefaultMilliCPURequest*5, 10) + "m"),
|
||||
strconv.FormatInt(schedutil.DefaultMilliCPURequest*5, 10) + "m"),
|
||||
"memory": resource.MustParse(
|
||||
strconv.FormatInt(priorityutil.DefaultMemoryRequest*5, 10)),
|
||||
strconv.FormatInt(schedutil.DefaultMemoryRequest*5, 10)),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1542,7 +1542,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
|
||||
|
||||
var nodes []*v1.Node
|
||||
for _, n := range test.nodes {
|
||||
node := makeNode(n, 1000*5, priorityutil.DefaultMemoryRequest*5)
|
||||
node := makeNode(n, 1000*5, schedutil.DefaultMemoryRequest*5)
|
||||
// if possible, split node name by '/' to form labels in a format of
|
||||
// {"hostname": node.Name[0], "zone": node.Name[1], "region": node.Name[2]}
|
||||
node.ObjectMeta.Labels = make(map[string]string)
|
||||
@ -1815,7 +1815,7 @@ func TestPickOneNodeForPreemption(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
var nodes []*v1.Node
|
||||
for _, n := range test.nodes {
|
||||
nodes = append(nodes, makeNode(n, priorityutil.DefaultMilliCPURequest*5, priorityutil.DefaultMemoryRequest*5))
|
||||
nodes = append(nodes, makeNode(n, schedutil.DefaultMilliCPURequest*5, schedutil.DefaultMemoryRequest*5))
|
||||
}
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes))
|
||||
registry := framework.Registry{}
|
||||
@ -2276,7 +2276,7 @@ func TestPreempt(t *testing.T) {
|
||||
}
|
||||
var nodes []*v1.Node
|
||||
for i, name := range nodeNames {
|
||||
node := makeNode(name, 1000*5, priorityutil.DefaultMemoryRequest*5)
|
||||
node := makeNode(name, 1000*5, schedutil.DefaultMemoryRequest*5)
|
||||
// if possible, split node name by '/' to form labels in a format of
|
||||
// {"hostname": node.Name[0], "zone": node.Name[1], "region": node.Name[2]}
|
||||
node.ObjectMeta.Labels = make(map[string]string)
|
||||
|
@ -10,7 +10,6 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/listers:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
|
@ -27,7 +27,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
@ -154,7 +153,7 @@ func (m topologyToMatchedTermCount) updateWithAffinityTerms(targetPod *v1.Pod, t
|
||||
func (m topologyToMatchedTermCount) updateWithAntiAffinityTerms(targetPod *v1.Pod, targetPodNode *v1.Node, antiAffinityTerms []*affinityTerm, value int64) {
|
||||
// Check anti-affinity terms.
|
||||
for _, a := range antiAffinityTerms {
|
||||
if priorityutil.PodMatchesTermsNamespaceAndSelector(targetPod, a.namespaces, a.selector) {
|
||||
if schedutil.PodMatchesTermsNamespaceAndSelector(targetPod, a.namespaces, a.selector) {
|
||||
if topologyValue, ok := targetPodNode.Labels[a.topologyKey]; ok {
|
||||
pair := topologyPair{key: a.topologyKey, value: topologyValue}
|
||||
m[pair] += value
|
||||
@ -184,7 +183,7 @@ func getAffinityTerms(pod *v1.Pod, v1Terms []v1.PodAffinityTerm) ([]*affinityTer
|
||||
|
||||
var terms []*affinityTerm
|
||||
for _, term := range v1Terms {
|
||||
namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(pod, &term)
|
||||
namespaces := schedutil.GetNamespacesFromPodAffinityTerm(pod, &term)
|
||||
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -200,7 +199,7 @@ func podMatchesAllAffinityTerms(pod *v1.Pod, terms []*affinityTerm) bool {
|
||||
return false
|
||||
}
|
||||
for _, term := range terms {
|
||||
if !priorityutil.PodMatchesTermsNamespaceAndSelector(pod, term.namespaces, term.selector) {
|
||||
if !schedutil.PodMatchesTermsNamespaceAndSelector(pod, term.namespaces, term.selector) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@ -487,7 +486,7 @@ func (pl *InterPodAffinity) podMatchesPodAffinityTerms(pod, targetPod *v1.Pod, n
|
||||
if len(term.TopologyKey) == 0 {
|
||||
return false, false, fmt.Errorf("empty topologyKey is not allowed except for PreferredDuringScheduling pod anti-affinity")
|
||||
}
|
||||
if !priorityutil.NodesHaveSameTopologyKey(nodeInfo.Node(), targetPodNodeInfo.Node(), term.TopologyKey) {
|
||||
if !schedutil.NodesHaveSameTopologyKey(nodeInfo.Node(), targetPodNodeInfo.Node(), term.TopologyKey) {
|
||||
return false, true, nil
|
||||
}
|
||||
}
|
||||
@ -509,8 +508,8 @@ func getMatchingAntiAffinityTopologyPairsOfPod(newPod *v1.Pod, existingPod *v1.P
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(existingPod, &term)
|
||||
if priorityutil.PodMatchesTermsNamespaceAndSelector(newPod, namespaces, selector) {
|
||||
namespaces := schedutil.GetNamespacesFromPodAffinityTerm(existingPod, &term)
|
||||
if schedutil.PodMatchesTermsNamespaceAndSelector(newPod, namespaces, selector) {
|
||||
if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
|
||||
pair := topologyPair{key: term.TopologyKey, value: topologyValue}
|
||||
topologyMap[pair]++
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
@ -53,7 +52,7 @@ type weightedAffinityTerm struct {
|
||||
}
|
||||
|
||||
func newWeightedAffinityTerm(pod *v1.Pod, term *v1.PodAffinityTerm, weight int32) (*weightedAffinityTerm, error) {
|
||||
namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(pod, term)
|
||||
namespaces := schedutil.GetNamespacesFromPodAffinityTerm(pod, term)
|
||||
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -88,7 +87,7 @@ func (pl *InterPodAffinity) processTerm(
|
||||
return
|
||||
}
|
||||
|
||||
match := priorityutil.PodMatchesTermsNamespaceAndSelector(podToCheck, term.namespaces, term.selector)
|
||||
match := schedutil.PodMatchesTermsNamespaceAndSelector(podToCheck, term.namespaces, term.selector)
|
||||
tpValue, tpValueExist := fixedNode.Labels[term.topologyKey]
|
||||
if match && tpValueExist {
|
||||
pl.Lock()
|
||||
|
@ -18,11 +18,11 @@ go_library(
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/migration:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
@ -22,9 +22,9 @@ import (
|
||||
"k8s.io/klog"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
|
||||
// resourceToWeightMap contains resource name and weight.
|
||||
@ -121,7 +121,7 @@ func calculatePodResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
|
||||
var podRequest int64
|
||||
for i := range pod.Spec.Containers {
|
||||
container := &pod.Spec.Containers[i]
|
||||
value := priorityutil.GetNonzeroRequestForResource(resource, &container.Resources.Requests)
|
||||
value := schedutil.GetNonzeroRequestForResource(resource, &container.Resources.Requests)
|
||||
podRequest += value
|
||||
}
|
||||
|
||||
|
2
pkg/scheduler/internal/cache/BUILD
vendored
2
pkg/scheduler/internal/cache/BUILD
vendored
@ -34,9 +34,9 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
10
pkg/scheduler/internal/cache/cache_test.go
vendored
10
pkg/scheduler/internal/cache/cache_test.go
vendored
@ -31,9 +31,9 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
|
||||
func deepEqualWithoutGeneration(actual *nodeInfoListItem, expected *schedulernodeinfo.NodeInfo) error {
|
||||
@ -152,8 +152,8 @@ func TestAssumePodScheduled(t *testing.T) {
|
||||
Memory: 0,
|
||||
},
|
||||
&schedulernodeinfo.Resource{
|
||||
MilliCPU: priorityutil.DefaultMilliCPURequest,
|
||||
Memory: priorityutil.DefaultMemoryRequest,
|
||||
MilliCPU: schedutil.DefaultMilliCPURequest,
|
||||
Memory: schedutil.DefaultMemoryRequest,
|
||||
},
|
||||
[]*v1.Pod{testPods[3]},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
@ -779,8 +779,8 @@ func TestEphemeralStorageResource(t *testing.T) {
|
||||
EphemeralStorage: 500,
|
||||
},
|
||||
&schedulernodeinfo.Resource{
|
||||
MilliCPU: priorityutil.DefaultMilliCPURequest,
|
||||
Memory: priorityutil.DefaultMemoryRequest,
|
||||
MilliCPU: schedutil.DefaultMilliCPURequest,
|
||||
Memory: schedutil.DefaultMemoryRequest,
|
||||
},
|
||||
[]*v1.Pod{podE},
|
||||
schedulernodeinfo.HostPortInfo{},
|
||||
|
@ -11,7 +11,6 @@ go_library(
|
||||
visibility = ["//pkg/scheduler:__subpackages__"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/internal/heap:go_default_library",
|
||||
"//pkg/scheduler/metrics:go_default_library",
|
||||
|
@ -37,7 +37,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/heap"
|
||||
"k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||
@ -588,12 +587,12 @@ func (p *PriorityQueue) getUnschedulablePodsWithMatchingAffinityTerm(pod *v1.Pod
|
||||
if affinity != nil && affinity.PodAffinity != nil {
|
||||
terms := util.GetPodAffinityTerms(affinity.PodAffinity)
|
||||
for _, term := range terms {
|
||||
namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(up, &term)
|
||||
namespaces := util.GetNamespacesFromPodAffinityTerm(up, &term)
|
||||
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
||||
if err != nil {
|
||||
klog.Errorf("Error getting label selectors for pod: %v.", up.Name)
|
||||
}
|
||||
if priorityutil.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) {
|
||||
if util.PodMatchesTermsNamespaceAndSelector(pod, namespaces, selector) {
|
||||
podsToMove = append(podsToMove, pInfo)
|
||||
break
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
|
@ -28,7 +28,7 @@ import (
|
||||
"k8s.io/klog"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -570,7 +570,7 @@ func calculateResource(pod *v1.Pod) (res Resource, non0CPU int64, non0Mem int64)
|
||||
for _, c := range pod.Spec.Containers {
|
||||
resPtr.Add(c.Resources.Requests)
|
||||
|
||||
non0CPUReq, non0MemReq := priorityutil.GetNonzeroRequests(&c.Resources.Requests)
|
||||
non0CPUReq, non0MemReq := schedutil.GetNonzeroRequests(&c.Resources.Requests)
|
||||
non0CPU += non0CPUReq
|
||||
non0Mem += non0MemReq
|
||||
// No non-zero resources for GPUs or opaque resources.
|
||||
|
@ -10,14 +10,21 @@ go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"error_channel_test.go",
|
||||
"non_zero_test.go",
|
||||
"topologies_test.go",
|
||||
"utils_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/apis/extender/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/selection:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -26,14 +33,19 @@ go_library(
|
||||
srcs = [
|
||||
"clock.go",
|
||||
"error_channel.go",
|
||||
"non_zero.go",
|
||||
"topologies.go",
|
||||
"utils.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/util",
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/scheduler/apis/extender/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -21,7 +21,7 @@ go_library(
|
||||
"//pkg/apis/core/v1/helper/qos:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/apis/scheduling:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/scheduling/v1:go_default_library",
|
||||
|
@ -35,7 +35,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2eevents "k8s.io/kubernetes/test/e2e/framework/events"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
@ -445,7 +445,7 @@ func getNonZeroRequests(pod *v1.Pod) Resource {
|
||||
result := Resource{}
|
||||
for i := range pod.Spec.Containers {
|
||||
container := &pod.Spec.Containers[i]
|
||||
cpu, memory := priorityutil.GetNonzeroRequests(&container.Resources.Requests)
|
||||
cpu, memory := schedutil.GetNonzeroRequests(&container.Resources.Requests)
|
||||
result.MilliCPU += cpu
|
||||
result.Memory += memory
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user