Added unit test for node operation in schedulercache.

This commit is contained in:
Klaus Ma 2017-05-04 15:29:45 +08:00
parent f0962765a7
commit fd2575e43e
9 changed files with 324 additions and 28 deletions

View File

@ -27,6 +27,7 @@ go_library(
"//plugin/pkg/scheduler/algorithm:go_default_library",
"//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library",
"//plugin/pkg/scheduler/schedulercache:go_default_library",
"//plugin/pkg/scheduler/util:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
@ -52,6 +53,7 @@ go_test(
"//plugin/pkg/scheduler/algorithm:go_default_library",
"//plugin/pkg/scheduler/schedulercache:go_default_library",
"//plugin/pkg/scheduler/testing:go_default_library",
"//plugin/pkg/scheduler/util:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",

View File

@ -21,6 +21,7 @@ import (
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
schedutil "k8s.io/kubernetes/plugin/pkg/scheduler/util"
)
type PredicateMetadataFactory struct {
@ -48,7 +49,7 @@ func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInf
pod: pod,
podBestEffort: isPodBestEffort(pod),
podRequest: GetResourceRequest(pod),
podPorts: GetUsedPorts(pod),
podPorts: schedutil.GetUsedPorts(pod),
matchingAntiAffinityTerms: matchingTerms,
}
for predicateName, precomputeFunc := range predicatePrecomputations {

View File

@ -40,6 +40,7 @@ import (
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
schedutil "k8s.io/kubernetes/plugin/pkg/scheduler/util"
"k8s.io/metrics/pkg/client/clientset_generated/clientset"
)
@ -835,7 +836,7 @@ func PodFitsHostPorts(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.No
wantPorts = predicateMeta.podPorts
} else {
// We couldn't parse metadata - fallback to computing it.
wantPorts = GetUsedPorts(pod)
wantPorts = schedutil.GetUsedPorts(pod)
}
if len(wantPorts) == 0 {
return true, nil, nil
@ -850,24 +851,6 @@ func PodFitsHostPorts(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.No
return true, nil, nil
}
func GetUsedPorts(pods ...*v1.Pod) map[int]bool {
ports := make(map[int]bool)
for _, pod := range pods {
for j := range pod.Spec.Containers {
container := &pod.Spec.Containers[j]
for k := range container.Ports {
podPort := &container.Ports[k]
// "0" is explicitly ignored in PodFitsHostPorts,
// which is the only function that uses this value.
if podPort.HostPort != 0 {
ports[int(podPort.HostPort)] = true
}
}
}
}
return ports
}
// search two arrays and return true if they have at least one common element; return false otherwise
func haveSame(a1, a2 []string) bool {
for _, val1 := range a1 {

View File

@ -29,6 +29,7 @@ import (
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
schedulertesting "k8s.io/kubernetes/plugin/pkg/scheduler/testing"
schedutil "k8s.io/kubernetes/plugin/pkg/scheduler/util"
)
type FakeNodeInfo v1.Node
@ -563,7 +564,7 @@ func TestGetUsedPorts(t *testing.T) {
}
for _, test := range tests {
ports := GetUsedPorts(test.pods...)
ports := schedutil.GetUsedPorts(test.pods...)
if !reflect.DeepEqual(test.ports, ports) {
t.Errorf("%s: expected %v, got %v", "test get used ports", test.ports, ports)
}

View File

@ -42,7 +42,9 @@ go_test(
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/api/v1/helper:go_default_library",
"//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library",
"//plugin/pkg/scheduler/util:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",

View File

@ -26,7 +26,9 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/api/v1"
v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
schedutil "k8s.io/kubernetes/plugin/pkg/scheduler/util"
)
func deepEqualWithoutGeneration(t *testing.T, testcase int, actual, expected *NodeInfo) {
@ -437,8 +439,7 @@ func TestRemovePod(t *testing.T) {
nodeName := "node"
basePod := makeBasePod(nodeName, "test", "100m", "500", []v1.ContainerPort{{HostPort: 80}})
tests := []struct {
pod *v1.Pod
pod *v1.Pod
wNodeInfo *NodeInfo
}{{
pod: basePod,
@ -506,6 +507,265 @@ func TestForgetPod(t *testing.T) {
}
}
// addResource adds ResourceList into Resource.
func addResource(r *Resource, rl v1.ResourceList) {
if r == nil {
return
}
for rName, rQuant := range rl {
switch rName {
case v1.ResourceCPU:
r.MilliCPU += rQuant.MilliValue()
case v1.ResourceMemory:
r.Memory += rQuant.Value()
case v1.ResourceNvidiaGPU:
r.NvidiaGPU += rQuant.Value()
default:
if v1helper.IsOpaqueIntResourceName(rName) {
r.AddOpaque(rName, rQuant.Value())
}
}
}
}
// getResourceRequest returns the resource request of all containers in Pods;
// excuding initContainers.
func getResourceRequest(pod *v1.Pod) v1.ResourceList {
result := &Resource{}
for _, container := range pod.Spec.Containers {
addResource(result, container.Resources.Requests)
}
return result.ResourceList()
}
// newResource returns a new Resource by ResourceList.
func newResource(rl v1.ResourceList) *Resource {
res := &Resource{}
for rName, rQuantity := range rl {
switch rName {
case v1.ResourceMemory:
res.Memory = rQuantity.Value()
case v1.ResourceCPU:
res.MilliCPU = rQuantity.MilliValue()
case v1.ResourceNvidiaGPU:
res.NvidiaGPU += rQuantity.Value()
default:
if v1helper.IsOpaqueIntResourceName(rName) {
res.SetOpaque(rName, rQuantity.Value())
}
}
}
return res
}
// buildNodeInfo creates a NodeInfo by simulating node operations in cache.
func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *NodeInfo {
expected := NewNodeInfo()
// Simulate SetNode.
expected.node = node
expected.allocatableResource = newResource(node.Status.Allocatable)
expected.taints = node.Spec.Taints
expected.generation++
for _, pod := range pods {
// Simulate AddPod
expected.pods = append(expected.pods, pod)
addResource(expected.requestedResource, getResourceRequest(pod))
addResource(expected.nonzeroRequest, getResourceRequest(pod))
expected.usedPorts = schedutil.GetUsedPorts(pod)
expected.generation++
}
return expected
}
// TestNodeOperators tests node operations of cache, including add, update
// and remove.
func TestNodeOperators(t *testing.T) {
// Test datas
nodeName := "test-node"
cpu_1 := resource.MustParse("1000m")
mem_100m := resource.MustParse("100m")
cpu_half := resource.MustParse("500m")
mem_50m := resource.MustParse("50m")
resourceFooName := "pod.alpha.kubernetes.io/opaque-int-resource-foo"
resourceFoo := resource.MustParse("1")
tests := []struct {
node *v1.Node
pods []*v1.Pod
}{
{
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: nodeName,
},
Status: v1.NodeStatus{
Allocatable: v1.ResourceList{
v1.ResourceCPU: cpu_1,
v1.ResourceMemory: mem_100m,
v1.ResourceName(resourceFooName): resourceFoo,
},
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{
Key: "test-key",
Value: "test-value",
Effect: v1.TaintEffectPreferNoSchedule,
},
},
},
},
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
},
Spec: v1.PodSpec{
NodeName: nodeName,
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: cpu_half,
v1.ResourceMemory: mem_50m,
},
},
Ports: []v1.ContainerPort{
{
Name: "http",
HostPort: 80,
ContainerPort: 80,
},
},
},
},
},
},
},
},
{
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: nodeName,
},
Status: v1.NodeStatus{
Allocatable: v1.ResourceList{
v1.ResourceCPU: cpu_1,
v1.ResourceMemory: mem_100m,
v1.ResourceName(resourceFooName): resourceFoo,
},
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{
Key: "test-key",
Value: "test-value",
Effect: v1.TaintEffectPreferNoSchedule,
},
},
},
},
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
},
Spec: v1.PodSpec{
NodeName: nodeName,
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: cpu_half,
v1.ResourceMemory: mem_50m,
},
},
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "pod2",
},
Spec: v1.PodSpec{
NodeName: nodeName,
Containers: []v1.Container{
{
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: cpu_half,
v1.ResourceMemory: mem_50m,
},
},
},
},
},
},
},
},
}
for _, test := range tests {
expected := buildNodeInfo(test.node, test.pods)
node := test.node
cache := newSchedulerCache(time.Second, time.Second, nil)
cache.AddNode(node)
for _, pod := range test.pods {
cache.AddPod(pod)
}
// Case 1: the node was added into cache successfully.
got, found := cache.nodes[node.Name]
if !found {
t.Errorf("Failed to find node %v in schedulercache.", node.Name)
}
if !reflect.DeepEqual(got, expected) {
t.Errorf("Failed to add node into schedulercache:\n got: %+v \nexpected: %+v", got, expected)
}
// Case 2: dump cached nodes successfully.
cachedNodes := map[string]*NodeInfo{}
cache.UpdateNodeNameToInfoMap(cachedNodes)
newNode, found := cachedNodes[node.Name]
if !found || len(cachedNodes) != 1 {
t.Errorf("failed to dump cached nodes:\n got: %v \nexpected: %v", cachedNodes, cache.nodes)
}
if !reflect.DeepEqual(newNode, expected) {
t.Errorf("Failed to clone node:\n got: %+v, \n expected: %+v", newNode, expected)
}
// Case 3: update node attribute successfully.
node.Status.Allocatable[v1.ResourceMemory] = mem_50m
expected.allocatableResource.Memory = mem_50m.Value()
expected.generation++
cache.UpdateNode(nil, node)
got, found = cache.nodes[node.Name]
if !found {
t.Errorf("Failed to find node %v in schedulercache after UpdateNode.", node.Name)
}
if !reflect.DeepEqual(got, expected) {
t.Errorf("Failed to update node in schedulercache:\n got: %+v \nexpected: %+v", got, expected)
}
// Case 4: the node can not be removed if pods is not empty.
cache.RemoveNode(node)
if _, found := cache.nodes[node.Name]; !found {
t.Errorf("The node %v should not be removed if pods is not empty.", node.Name)
}
}
}
func BenchmarkList1kNodes30kPods(b *testing.B) {
cache := setupCacheOf1kNodes30kPods(b)
b.ResetTimer()

View File

@ -90,9 +90,11 @@ func (r *Resource) Clone() *Resource {
Memory: r.Memory,
NvidiaGPU: r.NvidiaGPU,
}
res.OpaqueIntResources = make(map[v1.ResourceName]int64)
for k, v := range r.OpaqueIntResources {
res.OpaqueIntResources[k] = v
if r.OpaqueIntResources != nil {
res.OpaqueIntResources = make(map[v1.ResourceName]int64)
for k, v := range r.OpaqueIntResources {
res.OpaqueIntResources[k] = v
}
}
return res
}
@ -220,13 +222,13 @@ func (n *NodeInfo) Clone() *NodeInfo {
taintsErr: n.taintsErr,
memoryPressureCondition: n.memoryPressureCondition,
diskPressureCondition: n.diskPressureCondition,
usedPorts: make(map[int]bool),
generation: n.generation,
}
if len(n.pods) > 0 {
clone.pods = append([]*v1.Pod(nil), n.pods...)
}
if len(n.usedPorts) > 0 {
clone.usedPorts = make(map[int]bool)
for k, v := range n.usedPorts {
clone.usedPorts[k] = v
}

View File

@ -18,9 +18,13 @@ go_test(
go_library(
name = "go_default_library",
srcs = ["backoff_utils.go"],
srcs = [
"backoff_utils.go",
"utils.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
],

View File

@ -0,0 +1,41 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"k8s.io/kubernetes/pkg/api/v1"
)
// GetUsedPorts returns the used host ports of Pods: if 'port' was used, a 'port:true' pair
// will be in the result; but it does not resolve port conflict.
func GetUsedPorts(pods ...*v1.Pod) map[int]bool {
ports := make(map[int]bool)
for _, pod := range pods {
for j := range pod.Spec.Containers {
container := &pod.Spec.Containers[j]
for k := range container.Ports {
podPort := &container.Ports[k]
// "0" is explicitly ignored in PodFitsHostPorts,
// which is the only function that uses this value.
if podPort.HostPort != 0 {
ports[int(podPort.HostPort)] = true
}
}
}
}
return ports
}