Merge pull request #71872 from yuexiao-wang/scheduler-nodeinfo

[scheduler cleanup phase 2]: Rename `pkg/scheduler/cache` to `pkg/scheduler/nodeinfo`
This commit is contained in:
Kubernetes Prow Robot
2018-12-12 08:08:33 -08:00
committed by GitHub
98 changed files with 596 additions and 596 deletions

View File

@@ -11,7 +11,7 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//pkg/features:go_default_library",
"//pkg/scheduler/cache:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/util/node:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
@@ -33,7 +33,7 @@ go_test(
"//pkg/features:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
"//pkg/scheduler/cache:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@@ -27,7 +27,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/klog"
)
@@ -58,7 +58,7 @@ type schedulerCache struct {
assumedPods map[string]bool
// a map from pod key to podState.
podStates map[string]*podState
nodes map[string]*schedulercache.NodeInfo
nodes map[string]*schedulernodeinfo.NodeInfo
nodeTree *NodeTree
// A map from image name to its imageState.
imageStates map[string]*imageState
@@ -80,8 +80,8 @@ type imageState struct {
}
// createImageStateSummary returns a summarizing snapshot of the given image's state.
func (cache *schedulerCache) createImageStateSummary(state *imageState) *schedulercache.ImageStateSummary {
return &schedulercache.ImageStateSummary{
func (cache *schedulerCache) createImageStateSummary(state *imageState) *schedulernodeinfo.ImageStateSummary {
return &schedulernodeinfo.ImageStateSummary{
Size: state.size,
NumNodes: len(state.nodes),
}
@@ -93,7 +93,7 @@ func newSchedulerCache(ttl, period time.Duration, stop <-chan struct{}) *schedul
period: period,
stop: stop,
nodes: make(map[string]*schedulercache.NodeInfo),
nodes: make(map[string]*schedulernodeinfo.NodeInfo),
nodeTree: newNodeTree(nil),
assumedPods: make(map[string]bool),
podStates: make(map[string]*podState),
@@ -107,7 +107,7 @@ func (cache *schedulerCache) Snapshot() *Snapshot {
cache.mu.RLock()
defer cache.mu.RUnlock()
nodes := make(map[string]*schedulercache.NodeInfo)
nodes := make(map[string]*schedulernodeinfo.NodeInfo)
for k, v := range cache.nodes {
nodes[k] = v.Clone()
}
@@ -123,7 +123,7 @@ func (cache *schedulerCache) Snapshot() *Snapshot {
}
}
func (cache *schedulerCache) UpdateNodeNameToInfoMap(nodeNameToInfo map[string]*schedulercache.NodeInfo) error {
func (cache *schedulerCache) UpdateNodeNameToInfoMap(nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo) error {
cache.mu.Lock()
defer cache.mu.Unlock()
@@ -171,7 +171,7 @@ func (cache *schedulerCache) FilteredList(podFilter PodFilter, selector labels.S
}
func (cache *schedulerCache) AssumePod(pod *v1.Pod) error {
key, err := schedulercache.GetPodKey(pod)
key, err := schedulernodeinfo.GetPodKey(pod)
if err != nil {
return err
}
@@ -197,7 +197,7 @@ func (cache *schedulerCache) FinishBinding(pod *v1.Pod) error {
// finishBinding exists to make tests determinitistic by injecting now as an argument
func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error {
key, err := schedulercache.GetPodKey(pod)
key, err := schedulernodeinfo.GetPodKey(pod)
if err != nil {
return err
}
@@ -216,7 +216,7 @@ func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error {
}
func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error {
key, err := schedulercache.GetPodKey(pod)
key, err := schedulernodeinfo.GetPodKey(pod)
if err != nil {
return err
}
@@ -248,7 +248,7 @@ func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error {
func (cache *schedulerCache) addPod(pod *v1.Pod) {
n, ok := cache.nodes[pod.Spec.NodeName]
if !ok {
n = schedulercache.NewNodeInfo()
n = schedulernodeinfo.NewNodeInfo()
cache.nodes[pod.Spec.NodeName] = n
}
n.AddPod(pod)
@@ -276,7 +276,7 @@ func (cache *schedulerCache) removePod(pod *v1.Pod) error {
}
func (cache *schedulerCache) AddPod(pod *v1.Pod) error {
key, err := schedulercache.GetPodKey(pod)
key, err := schedulernodeinfo.GetPodKey(pod)
if err != nil {
return err
}
@@ -311,7 +311,7 @@ func (cache *schedulerCache) AddPod(pod *v1.Pod) error {
}
func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error {
key, err := schedulercache.GetPodKey(oldPod)
key, err := schedulernodeinfo.GetPodKey(oldPod)
if err != nil {
return err
}
@@ -339,7 +339,7 @@ func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error {
}
func (cache *schedulerCache) RemovePod(pod *v1.Pod) error {
key, err := schedulercache.GetPodKey(pod)
key, err := schedulernodeinfo.GetPodKey(pod)
if err != nil {
return err
}
@@ -368,7 +368,7 @@ func (cache *schedulerCache) RemovePod(pod *v1.Pod) error {
}
func (cache *schedulerCache) IsAssumedPod(pod *v1.Pod) (bool, error) {
key, err := schedulercache.GetPodKey(pod)
key, err := schedulernodeinfo.GetPodKey(pod)
if err != nil {
return false, err
}
@@ -384,7 +384,7 @@ func (cache *schedulerCache) IsAssumedPod(pod *v1.Pod) (bool, error) {
}
func (cache *schedulerCache) GetPod(pod *v1.Pod) (*v1.Pod, error) {
key, err := schedulercache.GetPodKey(pod)
key, err := schedulernodeinfo.GetPodKey(pod)
if err != nil {
return nil, err
}
@@ -406,7 +406,7 @@ func (cache *schedulerCache) AddNode(node *v1.Node) error {
n, ok := cache.nodes[node.Name]
if !ok {
n = schedulercache.NewNodeInfo()
n = schedulernodeinfo.NewNodeInfo()
cache.nodes[node.Name] = n
} else {
cache.removeNodeImageStates(n.Node())
@@ -423,7 +423,7 @@ func (cache *schedulerCache) UpdateNode(oldNode, newNode *v1.Node) error {
n, ok := cache.nodes[newNode.Name]
if !ok {
n = schedulercache.NewNodeInfo()
n = schedulernodeinfo.NewNodeInfo()
cache.nodes[newNode.Name] = n
} else {
cache.removeNodeImageStates(n.Node())
@@ -457,8 +457,8 @@ func (cache *schedulerCache) RemoveNode(node *v1.Node) error {
// addNodeImageStates adds states of the images on given node to the given nodeInfo and update the imageStates in
// scheduler cache. This function assumes the lock to scheduler cache has been acquired.
func (cache *schedulerCache) addNodeImageStates(node *v1.Node, nodeInfo *schedulercache.NodeInfo) {
newSum := make(map[string]*schedulercache.ImageStateSummary)
func (cache *schedulerCache) addNodeImageStates(node *v1.Node, nodeInfo *schedulernodeinfo.NodeInfo) {
newSum := make(map[string]*schedulernodeinfo.ImageStateSummary)
for _, image := range node.Status.Images {
for _, name := range image.Names {

View File

@@ -32,10 +32,10 @@ import (
utilfeaturetesting "k8s.io/apiserver/pkg/util/feature/testing"
"k8s.io/kubernetes/pkg/features"
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
func deepEqualWithoutGeneration(t *testing.T, testcase int, actual, expected *schedulercache.NodeInfo) {
func deepEqualWithoutGeneration(t *testing.T, testcase int, actual, expected *schedulernodeinfo.NodeInfo) {
// Ignore generation field.
if actual != nil {
actual.SetGeneration(0)
@@ -66,21 +66,21 @@ func (b *hostPortInfoBuilder) add(protocol, ip string, port int32) *hostPortInfo
return b
}
func (b *hostPortInfoBuilder) build() schedulercache.HostPortInfo {
res := make(schedulercache.HostPortInfo)
func (b *hostPortInfoBuilder) build() schedulernodeinfo.HostPortInfo {
res := make(schedulernodeinfo.HostPortInfo)
for _, param := range b.inputs {
res.Add(param.ip, param.protocol, param.port)
}
return res
}
func newNodeInfo(requestedResource *schedulercache.Resource,
nonzeroRequest *schedulercache.Resource,
func newNodeInfo(requestedResource *schedulernodeinfo.Resource,
nonzeroRequest *schedulernodeinfo.Resource,
pods []*v1.Pod,
usedPorts schedulercache.HostPortInfo,
imageStates map[string]*schedulercache.ImageStateSummary,
) *schedulercache.NodeInfo {
nodeInfo := schedulercache.NewNodeInfo(pods...)
usedPorts schedulernodeinfo.HostPortInfo,
imageStates map[string]*schedulernodeinfo.ImageStateSummary,
) *schedulernodeinfo.NodeInfo {
nodeInfo := schedulernodeinfo.NewNodeInfo(pods...)
nodeInfo.SetRequestedResource(requestedResource)
nodeInfo.SetNonZeroRequest(nonzeroRequest)
nodeInfo.SetUsedPorts(usedPorts)
@@ -108,98 +108,98 @@ func TestAssumePodScheduled(t *testing.T) {
tests := []struct {
pods []*v1.Pod
wNodeInfo *schedulercache.NodeInfo
wNodeInfo *schedulernodeinfo.NodeInfo
}{{
pods: []*v1.Pod{testPods[0]},
wNodeInfo: newNodeInfo(
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 100,
Memory: 500,
},
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 100,
Memory: 500,
},
[]*v1.Pod{testPods[0]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulercache.ImageStateSummary),
make(map[string]*schedulernodeinfo.ImageStateSummary),
),
}, {
pods: []*v1.Pod{testPods[1], testPods[2]},
wNodeInfo: newNodeInfo(
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 300,
Memory: 1524,
},
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 300,
Memory: 1524,
},
[]*v1.Pod{testPods[1], testPods[2]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulercache.ImageStateSummary),
make(map[string]*schedulernodeinfo.ImageStateSummary),
),
}, { // test non-zero request
pods: []*v1.Pod{testPods[3]},
wNodeInfo: newNodeInfo(
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 0,
Memory: 0,
},
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: priorityutil.DefaultMilliCPURequest,
Memory: priorityutil.DefaultMemoryRequest,
},
[]*v1.Pod{testPods[3]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulercache.ImageStateSummary),
make(map[string]*schedulernodeinfo.ImageStateSummary),
),
}, {
pods: []*v1.Pod{testPods[4]},
wNodeInfo: newNodeInfo(
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 100,
Memory: 500,
ScalarResources: map[v1.ResourceName]int64{"example.com/foo": 3},
},
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 100,
Memory: 500,
},
[]*v1.Pod{testPods[4]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulercache.ImageStateSummary),
make(map[string]*schedulernodeinfo.ImageStateSummary),
),
}, {
pods: []*v1.Pod{testPods[4], testPods[5]},
wNodeInfo: newNodeInfo(
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 300,
Memory: 1524,
ScalarResources: map[v1.ResourceName]int64{"example.com/foo": 8},
},
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 300,
Memory: 1524,
},
[]*v1.Pod{testPods[4], testPods[5]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulercache.ImageStateSummary),
make(map[string]*schedulernodeinfo.ImageStateSummary),
),
}, {
pods: []*v1.Pod{testPods[6]},
wNodeInfo: newNodeInfo(
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 100,
Memory: 500,
},
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 100,
Memory: 500,
},
[]*v1.Pod{testPods[6]},
newHostPortInfoBuilder().build(),
make(map[string]*schedulercache.ImageStateSummary),
make(map[string]*schedulernodeinfo.ImageStateSummary),
),
},
}
@@ -253,7 +253,7 @@ func TestExpirePod(t *testing.T) {
pods []*testExpirePodStruct
cleanupTime time.Time
wNodeInfo *schedulercache.NodeInfo
wNodeInfo *schedulernodeinfo.NodeInfo
}{{ // assumed pod would expires
pods: []*testExpirePodStruct{
{pod: testPods[0], assumedTime: now},
@@ -267,17 +267,17 @@ func TestExpirePod(t *testing.T) {
},
cleanupTime: now.Add(2 * ttl),
wNodeInfo: newNodeInfo(
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 200,
Memory: 1024,
},
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 200,
Memory: 1024,
},
[]*v1.Pod{testPods[1]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulercache.ImageStateSummary),
make(map[string]*schedulernodeinfo.ImageStateSummary),
),
}}
@@ -313,22 +313,22 @@ func TestAddPodWillConfirm(t *testing.T) {
podsToAssume []*v1.Pod
podsToAdd []*v1.Pod
wNodeInfo *schedulercache.NodeInfo
wNodeInfo *schedulernodeinfo.NodeInfo
}{{ // two pod were assumed at same time. But first one is called Add() and gets confirmed.
podsToAssume: []*v1.Pod{testPods[0], testPods[1]},
podsToAdd: []*v1.Pod{testPods[0]},
wNodeInfo: newNodeInfo(
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 100,
Memory: 500,
},
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 100,
Memory: 500,
},
[]*v1.Pod{testPods[0]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulercache.ImageStateSummary),
make(map[string]*schedulernodeinfo.ImageStateSummary),
),
}}
@@ -405,25 +405,25 @@ func TestAddPodWillReplaceAssumed(t *testing.T) {
podsToAdd []*v1.Pod
podsToUpdate [][]*v1.Pod
wNodeInfo map[string]*schedulercache.NodeInfo
wNodeInfo map[string]*schedulernodeinfo.NodeInfo
}{{
podsToAssume: []*v1.Pod{assumedPod.DeepCopy()},
podsToAdd: []*v1.Pod{addedPod.DeepCopy()},
podsToUpdate: [][]*v1.Pod{{addedPod.DeepCopy(), updatedPod.DeepCopy()}},
wNodeInfo: map[string]*schedulercache.NodeInfo{
wNodeInfo: map[string]*schedulernodeinfo.NodeInfo{
"assumed-node": nil,
"actual-node": newNodeInfo(
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 200,
Memory: 500,
},
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 200,
Memory: 500,
},
[]*v1.Pod{updatedPod.DeepCopy()},
newHostPortInfoBuilder().add("TCP", "0.0.0.0", 90).build(),
make(map[string]*schedulercache.ImageStateSummary),
make(map[string]*schedulernodeinfo.ImageStateSummary),
),
},
}}
@@ -463,21 +463,21 @@ func TestAddPodAfterExpiration(t *testing.T) {
tests := []struct {
pod *v1.Pod
wNodeInfo *schedulercache.NodeInfo
wNodeInfo *schedulernodeinfo.NodeInfo
}{{
pod: basePod,
wNodeInfo: newNodeInfo(
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 100,
Memory: 500,
},
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 100,
Memory: 500,
},
[]*v1.Pod{basePod},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulercache.ImageStateSummary),
make(map[string]*schedulernodeinfo.ImageStateSummary),
),
}}
@@ -516,34 +516,34 @@ func TestUpdatePod(t *testing.T) {
podsToAdd []*v1.Pod
podsToUpdate []*v1.Pod
wNodeInfo []*schedulercache.NodeInfo
wNodeInfo []*schedulernodeinfo.NodeInfo
}{{ // add a pod and then update it twice
podsToAdd: []*v1.Pod{testPods[0]},
podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]},
wNodeInfo: []*schedulercache.NodeInfo{newNodeInfo(
&schedulercache.Resource{
wNodeInfo: []*schedulernodeinfo.NodeInfo{newNodeInfo(
&schedulernodeinfo.Resource{
MilliCPU: 200,
Memory: 1024,
},
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 200,
Memory: 1024,
},
[]*v1.Pod{testPods[1]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulercache.ImageStateSummary),
make(map[string]*schedulernodeinfo.ImageStateSummary),
), newNodeInfo(
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 100,
Memory: 500,
},
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 100,
Memory: 500,
},
[]*v1.Pod{testPods[0]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulercache.ImageStateSummary),
make(map[string]*schedulernodeinfo.ImageStateSummary),
)},
}}
@@ -643,35 +643,35 @@ func TestExpireAddUpdatePod(t *testing.T) {
podsToAdd []*v1.Pod
podsToUpdate []*v1.Pod
wNodeInfo []*schedulercache.NodeInfo
wNodeInfo []*schedulernodeinfo.NodeInfo
}{{ // Pod is assumed, expired, and added. Then it would be updated twice.
podsToAssume: []*v1.Pod{testPods[0]},
podsToAdd: []*v1.Pod{testPods[0]},
podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]},
wNodeInfo: []*schedulercache.NodeInfo{newNodeInfo(
&schedulercache.Resource{
wNodeInfo: []*schedulernodeinfo.NodeInfo{newNodeInfo(
&schedulernodeinfo.Resource{
MilliCPU: 200,
Memory: 1024,
},
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 200,
Memory: 1024,
},
[]*v1.Pod{testPods[1]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulercache.ImageStateSummary),
make(map[string]*schedulernodeinfo.ImageStateSummary),
), newNodeInfo(
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 100,
Memory: 500,
},
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 100,
Memory: 500,
},
[]*v1.Pod{testPods[0]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulercache.ImageStateSummary),
make(map[string]*schedulernodeinfo.ImageStateSummary),
)},
}}
@@ -733,21 +733,21 @@ func TestEphemeralStorageResource(t *testing.T) {
podE := makePodWithEphemeralStorage(nodeName, "500")
tests := []struct {
pod *v1.Pod
wNodeInfo *schedulercache.NodeInfo
wNodeInfo *schedulernodeinfo.NodeInfo
}{
{
pod: podE,
wNodeInfo: newNodeInfo(
&schedulercache.Resource{
&schedulernodeinfo.Resource{
EphemeralStorage: 500,
},
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: priorityutil.DefaultMilliCPURequest,
Memory: priorityutil.DefaultMemoryRequest,
},
[]*v1.Pod{podE},
schedulercache.HostPortInfo{},
make(map[string]*schedulercache.ImageStateSummary),
schedulernodeinfo.HostPortInfo{},
make(map[string]*schedulernodeinfo.ImageStateSummary),
),
},
}
@@ -778,21 +778,21 @@ func TestRemovePod(t *testing.T) {
basePod := makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}})
tests := []struct {
pod *v1.Pod
wNodeInfo *schedulercache.NodeInfo
wNodeInfo *schedulernodeinfo.NodeInfo
}{{
pod: basePod,
wNodeInfo: newNodeInfo(
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 100,
Memory: 500,
},
&schedulercache.Resource{
&schedulernodeinfo.Resource{
MilliCPU: 100,
Memory: 500,
},
[]*v1.Pod{basePod},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulercache.ImageStateSummary),
make(map[string]*schedulernodeinfo.ImageStateSummary),
),
}}
@@ -872,7 +872,7 @@ func TestForgetPod(t *testing.T) {
// getResourceRequest returns the resource request of all containers in Pods;
// excuding initContainers.
func getResourceRequest(pod *v1.Pod) v1.ResourceList {
result := &schedulercache.Resource{}
result := &schedulernodeinfo.Resource{}
for _, container := range pod.Spec.Containers {
result.Add(container.Resources.Requests)
}
@@ -881,13 +881,13 @@ func getResourceRequest(pod *v1.Pod) v1.ResourceList {
}
// buildNodeInfo creates a NodeInfo by simulating node operations in cache.
func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *schedulercache.NodeInfo {
expected := schedulercache.NewNodeInfo()
func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *schedulernodeinfo.NodeInfo {
expected := schedulernodeinfo.NewNodeInfo()
// Simulate SetNode.
expected.SetNode(node)
expected.SetAllocatableResource(schedulercache.NewResource(node.Status.Allocatable))
expected.SetAllocatableResource(schedulernodeinfo.NewResource(node.Status.Allocatable))
expected.SetTaints(node.Spec.Taints)
expected.SetGeneration(expected.GetGeneration() + 1)
@@ -1068,7 +1068,7 @@ func TestNodeOperators(t *testing.T) {
}
// Case 2: dump cached nodes successfully.
cachedNodes := map[string]*schedulercache.NodeInfo{}
cachedNodes := map[string]*schedulernodeinfo.NodeInfo{}
cache.UpdateNodeNameToInfoMap(cachedNodes)
newNode, found := cachedNodes[node.Name]
if !found || len(cachedNodes) != 1 {
@@ -1089,7 +1089,7 @@ func TestNodeOperators(t *testing.T) {
cache.UpdateNode(nil, node)
got, found = cache.nodes[node.Name]
if !found {
t.Errorf("Failed to find node %v in schedulercache after UpdateNode.", node.Name)
t.Errorf("Failed to find node %v in schedulernodeinfo after UpdateNode.", node.Name)
}
if got.GetGeneration() <= expected.GetGeneration() {
t.Errorf("Generation is not incremented. got: %v, expected: %v", got.GetGeneration(), expected.GetGeneration())
@@ -1097,7 +1097,7 @@ func TestNodeOperators(t *testing.T) {
expected.SetGeneration(got.GetGeneration())
if !reflect.DeepEqual(got, expected) {
t.Errorf("Failed to update node in schedulercache:\n got: %+v \nexpected: %+v", got, expected)
t.Errorf("Failed to update node in schedulernodeinfo:\n got: %+v \nexpected: %+v", got, expected)
}
// Check nodeTree after update
if cache.nodeTree.NumNodes() != 1 || cache.nodeTree.Next() != node.Name {
@@ -1131,7 +1131,7 @@ func BenchmarkUpdate1kNodes30kPods(b *testing.B) {
cache := setupCacheOf1kNodes30kPods(b)
b.ResetTimer()
for n := 0; n < b.N; n++ {
cachedNodes := map[string]*schedulercache.NodeInfo{}
cachedNodes := map[string]*schedulernodeinfo.NodeInfo{}
cache.UpdateNodeNameToInfoMap(cachedNodes)
}
}

View File

@@ -10,9 +10,9 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger",
visibility = ["//pkg/scheduler:__subpackages__"],
deps = [
"//pkg/scheduler/cache:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/internal/queue:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
@@ -25,7 +25,7 @@ go_test(
srcs = ["comparer_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/scheduler/cache:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
],

View File

@@ -24,9 +24,9 @@ import (
"k8s.io/apimachinery/pkg/labels"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/klog"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
// CacheComparer is an implementation of the Scheduler's cache comparer.
@@ -68,7 +68,7 @@ func (c *CacheComparer) Compare() error {
}
// CompareNodes compares actual nodes with cached nodes.
func (c *CacheComparer) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*schedulercache.NodeInfo) (missed, redundant []string) {
func (c *CacheComparer) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*schedulernodeinfo.NodeInfo) (missed, redundant []string) {
actual := []string{}
for _, node := range nodes {
actual = append(actual, node.Name)
@@ -83,7 +83,7 @@ func (c *CacheComparer) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*sch
}
// ComparePods compares actual pods with cached pods.
func (c *CacheComparer) ComparePods(pods, waitingPods []*v1.Pod, nodeinfos map[string]*schedulercache.NodeInfo) (missed, redundant []string) {
func (c *CacheComparer) ComparePods(pods, waitingPods []*v1.Pod, nodeinfos map[string]*schedulernodeinfo.NodeInfo) (missed, redundant []string) {
actual := []string{}
for _, pod := range pods {
actual = append(actual, string(pod.UID))

View File

@@ -22,7 +22,7 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
func TestCompareNodes(t *testing.T) {
@@ -72,9 +72,9 @@ func testCompareNodes(actual, cached, missing, redundant []string, t *testing.T)
nodes = append(nodes, node)
}
nodeInfo := make(map[string]*schedulercache.NodeInfo)
nodeInfo := make(map[string]*schedulernodeinfo.NodeInfo)
for _, nodeName := range cached {
nodeInfo[nodeName] = &schedulercache.NodeInfo{}
nodeInfo[nodeName] = &schedulernodeinfo.NodeInfo{}
}
m, r := compare.CompareNodes(nodes, nodeInfo)
@@ -170,14 +170,14 @@ func testComparePods(actual, cached, queued, missing, redundant []string, t *tes
queuedPods = append(queuedPods, pod)
}
nodeInfo := make(map[string]*schedulercache.NodeInfo)
nodeInfo := make(map[string]*schedulernodeinfo.NodeInfo)
for _, uid := range cached {
pod := &v1.Pod{}
pod.UID = types.UID(uid)
pod.Namespace = "ns"
pod.Name = uid
nodeInfo[uid] = schedulercache.NewNodeInfo(pod)
nodeInfo[uid] = schedulernodeinfo.NewNodeInfo(pod)
}
m, r := compare.ComparePods(pods, queuedPods, nodeInfo)

View File

@@ -23,9 +23,9 @@ import (
"k8s.io/klog"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/scheduler/cache"
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
"k8s.io/kubernetes/pkg/scheduler/internal/queue"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
// CacheDumper writes some information from the scheduler cache and the scheduling queue to the
@@ -61,7 +61,7 @@ func (d *CacheDumper) dumpSchedulingQueue() {
}
// printNodeInfo writes parts of NodeInfo to a string.
func printNodeInfo(n *cache.NodeInfo) string {
func printNodeInfo(n *schedulernodeinfo.NodeInfo) string {
var nodeData strings.Builder
nodeData.WriteString(fmt.Sprintf("\nNode name: %+v\nRequested Resources: %+v\nAllocatable Resources:%+v\nNumber of Pods: %v\nPods:\n",
n.Node().Name, n.RequestedResource(), n.AllocatableResource(), len(n.Pods())))

View File

@@ -6,8 +6,8 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake",
visibility = ["//pkg/scheduler:__subpackages__"],
deps = [
"//pkg/scheduler/cache:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
],

View File

@@ -19,8 +19,8 @@ package fake
import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
schedulerinternalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
// Cache is used for testing
@@ -75,7 +75,7 @@ func (c *Cache) UpdateNode(oldNode, newNode *v1.Node) error { return nil }
func (c *Cache) RemoveNode(node *v1.Node) error { return nil }
// UpdateNodeNameToInfoMap is a fake method for testing.
func (c *Cache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeInfo) error {
func (c *Cache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulernodeinfo.NodeInfo) error {
return nil
}

View File

@@ -19,7 +19,7 @@ package cache
import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
schedulercache "k8s.io/kubernetes/pkg/scheduler/cache"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
// PodFilter is a function to filter a pod. If pod passed return true else return false.
@@ -100,7 +100,7 @@ type Cache interface {
// UpdateNodeNameToInfoMap updates the passed infoMap to the current contents of Cache.
// The node info contains aggregated information of pods scheduled (including assumed to be)
// on this node.
UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeInfo) error
UpdateNodeNameToInfoMap(infoMap map[string]*schedulernodeinfo.NodeInfo) error
// List lists all cached pods (including assumed ones).
List(labels.Selector) ([]*v1.Pod, error)
@@ -118,5 +118,5 @@ type Cache interface {
// Snapshot is a snapshot of cache state
type Snapshot struct {
AssumedPods map[string]bool
Nodes map[string]*schedulercache.NodeInfo
Nodes map[string]*schedulernodeinfo.NodeInfo
}