Merge pull request #48847 from bsalamat/e2e_test_conversion

Automatic merge from submit-queue

Add an integration test library and some integration tests for scheduler

**What this PR does / why we need it**:

1. Add an integration test library (utils.go) for scheduler testing.
2. Cleaned up some of the tests in scheduler_test.go with the new integration test library.
3. Add priority_test.go with a couple of examples on how to test scheduler priority function in integration tests.

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #

**Special notes for your reviewer**:

**Release note**:

```release-note
NONE
```

ref/ #48176

@kubernetes/sig-scheduling-pr-reviews 
@davidopp @k82cn @vikaschoudhary16
This commit is contained in:
Kubernetes Submit Queue 2017-07-21 08:44:43 -07:00 committed by GitHub
commit e0c537a453
7 changed files with 672 additions and 385 deletions

View File

@ -83,111 +83,6 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
Expect(err).NotTo(HaveOccurred())
})
It("Pod should be prefer scheduled to node that satisify the NodeAffinity", func() {
nodeName := GetNodeThatCanRunPod(f)
By("Trying to apply a label on the found node.")
k := fmt.Sprintf("kubernetes.io/e2e-%s", "node-topologyKey")
v := "topologyvalue"
framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(cs, nodeName, k)
// make the nodes have balanced cpu,mem usage ratio
err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.6)
framework.ExpectNoError(err)
By("Trying to relaunch the pod, now with labels.")
labelPodName := "pod-with-node-affinity"
pod := createPausePod(f, pausePodConfig{
Name: labelPodName,
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
{
Preference: v1.NodeSelectorTerm{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: k,
Operator: v1.NodeSelectorOpIn,
Values: []string{v},
},
},
},
Weight: 20,
},
},
},
},
})
By("Wait the pod becomes running.")
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
labelPod, err := cs.CoreV1().Pods(ns).Get(labelPodName, metav1.GetOptions{})
framework.ExpectNoError(err)
By("Verify the pod was scheduled to the expected node.")
Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
})
It("Pod should be schedule to node that satisify the PodAffinity", func() {
By("Trying to launch a pod with a label to get a node which can launch it.")
pod := runPausePod(f, pausePodConfig{
Name: "with-label-security-s1",
Labels: map[string]string{"service": "S1"},
Resources: podRequestedResource,
})
nodeName := pod.Spec.NodeName
By("Trying to apply label on the found node.")
k := fmt.Sprintf("kubernetes.io/e2e-%s", "node-topologyKey")
v := "topologyvalue"
framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
framework.ExpectNodeHasLabel(cs, nodeName, k, v)
defer framework.RemoveLabelOffNode(cs, nodeName, k)
// make the nodes have balanced cpu,mem usage
err := createBalancedPodForNodes(f, cs, ns, nodeList.Items, podRequestedResource, 0.6)
framework.ExpectNoError(err)
By("Trying to launch the pod, now with podAffinity.")
labelPodName := "pod-with-podaffinity"
pod = createPausePod(f, pausePodConfig{
Resources: podRequestedResource,
Name: labelPodName,
Affinity: &v1.Affinity{
PodAffinity: &v1.PodAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{
{
PodAffinityTerm: v1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "service",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"S1", "value2"},
},
{
Key: "service",
Operator: metav1.LabelSelectorOpNotIn,
Values: []string{"S2"},
}, {
Key: "service",
Operator: metav1.LabelSelectorOpExists,
},
},
},
TopologyKey: k,
Namespaces: []string{ns},
},
Weight: 20,
},
},
},
},
})
By("Wait the pod becomes running.")
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
labelPod, err := cs.CoreV1().Pods(ns).Get(labelPodName, metav1.GetOptions{})
framework.ExpectNoError(err)
By("Verify the pod was scheduled to the expected node.")
Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
})
It("Pod should be schedule to node that don't match the PodAntiAffinity terms", func() {
By("Trying to launch a pod with a label to get a node which can launch it.")
pod := runPausePod(f, pausePodConfig{

View File

@ -14,6 +14,7 @@ go_library(
"master_utils.go",
"perf_utils.go",
"serializer.go",
"util.go",
],
data = [
"@com_coreos_etcd//:etcd",

View File

@ -0,0 +1,53 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// TODO: This file can potentially be moved to a common place used by both e2e and integration tests.
package framework
import (
"strings"
clientset "k8s.io/client-go/kubernetes"
)
const (
// When these values are updated, also update cmd/kubelet/app/options/options.go
// A copy of these values exist in e2e/framework/util.go.
currentPodInfraContainerImageName = "gcr.io/google_containers/pause"
currentPodInfraContainerImageVersion = "3.0"
)
// GetServerArchitecture fetches the architecture of the cluster's apiserver.
func GetServerArchitecture(c clientset.Interface) string {
arch := ""
sVer, err := c.Discovery().ServerVersion()
if err != nil || sVer.Platform == "" {
// If we failed to get the server version for some reason, default to amd64.
arch = "amd64"
} else {
// Split the platform string into OS and Arch separately.
// The platform string may for example be "linux/amd64", "linux/arm" or "windows/amd64".
osArchArray := strings.Split(sVer.Platform, "/")
arch = osArchArray[1]
}
return arch
}
// GetPauseImageName fetches the pause image name for the same architecture as the apiserver.
func GetPauseImageName(c clientset.Interface) string {
return currentPodInfraContainerImageName + "-" + GetServerArchitecture(c) + ":" + currentPodInfraContainerImageVersion
}

View File

@ -4,6 +4,7 @@ licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
@ -13,8 +14,10 @@ go_test(
srcs = [
"extender_test.go",
"main_test.go",
"priorities_test.go",
"scheduler_test.go",
],
library = ":go_default_library",
tags = [
"automanaged",
"integration",
@ -32,8 +35,8 @@ go_test(
"//plugin/pkg/scheduler/schedulercache:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/integration/framework:go_default_library",
"//test/utils:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
@ -59,3 +62,29 @@ filegroup(
srcs = [":package-srcs"],
tags = ["automanaged"],
)
go_library(
name = "go_default_library",
srcs = ["util.go"],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/testapi:go_default_library",
"//plugin/pkg/scheduler:go_default_library",
"//plugin/pkg/scheduler/algorithmprovider:go_default_library",
"//plugin/pkg/scheduler/factory:go_default_library",
"//test/integration/framework:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/core/v1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
],
)

View File

@ -0,0 +1,174 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduler
import (
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
testutils "k8s.io/kubernetes/test/utils"
)
// This file tests the scheduler priority functions.
// TestNodeAffinity verifies that scheduler's node affinity priority function
// works correctly.
func TestNodeAffinity(t *testing.T) {
context := initTest(t, "node-affinity")
defer cleanupTest(t, context)
// Add a few nodes.
nodes, err := createNodes(context.clientSet, "testnode", nil, 5)
if err != nil {
t.Fatalf("Cannot create nodes: %v", err)
}
// Add a label to one of the nodes.
labeledNode := nodes[1]
labelKey := "kubernetes.io/node-topologyKey"
labelValue := "topologyvalue"
labels := map[string]string{
labelKey: labelValue,
}
if err = testutils.AddLabelsToNode(context.clientSet, labeledNode.Name, labels); err != nil {
t.Fatalf("Cannot add labels to node: %v", err)
}
if err = waitForNodeLabels(context.clientSet, labeledNode.Name, labels); err != nil {
t.Fatalf("Adding labels to node didn't succeed: %v", err)
}
// Create a pod with node affinity.
podName := "pod-with-node-affinity"
pod, err := runPausePod(context.clientSet, &pausePodConfig{
Name: podName,
Namespace: context.ns.Name,
Affinity: &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
{
Preference: v1.NodeSelectorTerm{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: labelKey,
Operator: v1.NodeSelectorOpIn,
Values: []string{labelValue},
},
},
},
Weight: 20,
},
},
},
},
})
if err != nil {
t.Fatalf("Error running pause pod: %v", err)
}
if pod.Spec.NodeName != labeledNode.Name {
t.Errorf("Pod %v got scheduled on an unexpected node: %v. Expected node: %v.", podName, pod.Spec.NodeName, labeledNode.Name)
} else {
t.Logf("Pod %v got successfully scheduled on node %v.", podName, pod.Spec.NodeName)
}
}
// TestPodAffinity verifies that scheduler's pod affinity priority function
// works correctly.
func TestPodAffinity(t *testing.T) {
context := initTest(t, "pod-affinity")
defer cleanupTest(t, context)
// Add a few nodes.
nodesInTopology, err := createNodes(context.clientSet, "in-topology", nil, 5)
if err != nil {
t.Fatalf("Cannot create nodes: %v", err)
}
topologyKey := "node-topologykey"
topologyValue := "topologyvalue"
nodeLabels := map[string]string{
topologyKey: topologyValue,
}
for _, node := range nodesInTopology {
// Add topology key to all the nodes.
if err = testutils.AddLabelsToNode(context.clientSet, node.Name, nodeLabels); err != nil {
t.Fatalf("Cannot add labels to node %v: %v", node.Name, err)
}
if err = waitForNodeLabels(context.clientSet, node.Name, nodeLabels); err != nil {
t.Fatalf("Adding labels to node %v didn't succeed: %v", node.Name, err)
}
}
// Add a pod with a label and wait for it to schedule.
labelKey := "service"
labelValue := "S1"
_, err = runPausePod(context.clientSet, &pausePodConfig{
Name: "attractor-pod",
Namespace: context.ns.Name,
Labels: map[string]string{labelKey: labelValue},
})
if err != nil {
t.Fatalf("Error running the attractor pod: %v", err)
}
// Add a few more nodes without the topology label.
_, err = createNodes(context.clientSet, "other-node", nil, 5)
if err != nil {
t.Fatalf("Cannot create the second set of nodes: %v", err)
}
// Add a new pod with affinity to the attractor pod.
podName := "pod-with-podaffinity"
pod, err := runPausePod(context.clientSet, &pausePodConfig{
Name: podName,
Namespace: context.ns.Name,
Affinity: &v1.Affinity{
PodAffinity: &v1.PodAffinity{
PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{
{
PodAffinityTerm: v1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: labelKey,
Operator: metav1.LabelSelectorOpIn,
Values: []string{labelValue, "S3"},
},
{
Key: labelKey,
Operator: metav1.LabelSelectorOpNotIn,
Values: []string{"S2"},
}, {
Key: labelKey,
Operator: metav1.LabelSelectorOpExists,
},
},
},
TopologyKey: topologyKey,
Namespaces: []string{context.ns.Name},
},
Weight: 50,
},
},
},
},
})
if err != nil {
t.Fatalf("Error running pause pod: %v", err)
}
// The new pod must be scheduled on one of the nodes with the same topology
// key-value as the attractor pod.
for _, node := range nodesInTopology {
if node.Name == pod.Spec.NodeName {
t.Logf("Pod %v got successfully scheduled on node %v.", podName, pod.Spec.NodeName)
return
}
}
t.Errorf("Pod %v got scheduled on an unexpected node: %v.", podName, pod.Spec.NodeName)
}

View File

@ -25,7 +25,6 @@ import (
"k8s.io/api/core/v1"
clientv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
@ -46,7 +45,6 @@ import (
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
e2e "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/integration/framework"
)
@ -134,10 +132,10 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
informerFactory.Core().V1().Services(),
eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: v1.DefaultSchedulerName}),
)
if err != nil {
t.Fatalf("Error creating scheduler: %v", err)
}
defer close(sched.Config().StopEverything)
// Verify that the config is applied correctly.
schedPredicates := sched.Config().Algorithm.Predicates()
@ -152,8 +150,6 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
if schedPrioritizers[1].Function == nil || schedPrioritizers[1].Weight != 5 {
t.Errorf("Unexpected prioritizer: func: %v, weight: %v", schedPrioritizers[1].Function, schedPrioritizers[1].Weight)
}
defer close(sched.Config().StopEverything)
}
// TestSchedulerCreationFromNonExistentConfigMap ensures that creation of the
@ -203,11 +199,11 @@ func TestSchedulerCreationInLegacyMode(t *testing.T) {
defer framework.DeleteTestingNamespace(ns, s, t)
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
defer clientSet.Core().Nodes().DeleteCollection(nil, metav1.ListOptions{})
defer clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.Core().RESTClient()).Events("")})
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.CoreV1().RESTClient()).Events("")})
ss := options.NewSchedulerServer()
ss.HardPodAffinitySymmetricWeight = v1.DefaultHardPodAffinitySymmetricWeight
@ -225,107 +221,37 @@ func TestSchedulerCreationInLegacyMode(t *testing.T) {
informerFactory.Core().V1().Services(),
eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: v1.DefaultSchedulerName}),
)
if err != nil {
t.Fatalf("Creation of scheduler in legacy mode failed: %v", err)
}
informerFactory.Start(sched.Config().StopEverything)
defer close(sched.Config().StopEverything)
sched.Run()
DoTestUnschedulableNodes(t, clientSet, ns, informerFactory.Core().V1().Nodes().Lister())
_, err = createNode(clientSet, "test-node", nil)
if err != nil {
t.Fatalf("Failed to create node: %v", err)
}
pod, err := createPausePodWithResource(clientSet, "test-pod", "configmap", nil)
if err != nil {
t.Fatalf("Failed to create pod: %v", err)
}
err = waitForPodToSchedule(clientSet, pod)
if err != nil {
t.Errorf("Failed to schedule a pod: %v", err)
} else {
t.Logf("Pod got scheduled on a node.")
}
}
func TestUnschedulableNodes(t *testing.T) {
_, s, closeFn := framework.RunAMaster(nil)
defer closeFn()
context := initTest(t, "unschedulable-nodes")
defer cleanupTest(t, context)
ns := framework.CreateTestingNamespace("unschedulable-nodes", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
schedulerConfigFactory := factory.NewConfigFactory(
v1.DefaultSchedulerName,
clientSet,
informerFactory.Core().V1().Nodes(),
informerFactory.Core().V1().Pods(),
informerFactory.Core().V1().PersistentVolumes(),
informerFactory.Core().V1().PersistentVolumeClaims(),
informerFactory.Core().V1().ReplicationControllers(),
informerFactory.Extensions().V1beta1().ReplicaSets(),
informerFactory.Apps().V1beta1().StatefulSets(),
informerFactory.Core().V1().Services(),
v1.DefaultHardPodAffinitySymmetricWeight,
enableEquivalenceCache,
)
schedulerConfig, err := schedulerConfigFactory.Create()
if err != nil {
t.Fatalf("Couldn't create scheduler config: %v", err)
}
eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: v1.DefaultSchedulerName})
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.Core().RESTClient()).Events("")})
informerFactory.Start(schedulerConfig.StopEverything)
sched, _ := scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{Config: schedulerConfig}, nil...)
sched.Run()
defer close(schedulerConfig.StopEverything)
DoTestUnschedulableNodes(t, clientSet, ns, schedulerConfigFactory.GetNodeLister())
}
func podScheduled(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.Core().Pods(podNamespace).Get(podName, metav1.GetOptions{})
if errors.IsNotFound(err) {
return false, nil
}
if err != nil {
// This could be a connection error so we want to retry.
return false, nil
}
if pod.Spec.NodeName == "" {
return false, nil
}
return true, nil
}
}
// Wait till the passFunc confirms that the object it expects to see is in the store.
// Used to observe reflected events.
func waitForReflection(t *testing.T, nodeLister corelisters.NodeLister, key string, passFunc func(n interface{}) bool) error {
nodes := []*v1.Node{}
err := wait.Poll(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) {
n, err := nodeLister.Get(key)
switch {
case err == nil && passFunc(n):
return true, nil
case errors.IsNotFound(err):
nodes = append(nodes, nil)
case err != nil:
t.Errorf("Unexpected error: %v", err)
default:
nodes = append(nodes, n)
}
return false, nil
})
if err != nil {
t.Logf("Logging consecutive node versions received from store:")
for i, n := range nodes {
t.Logf("%d: %#v", i, n)
}
}
return err
}
func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *v1.Namespace, nodeLister corelisters.NodeLister) {
nodeLister := context.schedulerConfigFactory.GetNodeLister()
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (Nodes).
defer cs.Core().Nodes().DeleteCollection(nil, metav1.ListOptions{})
defer context.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
goodCondition := v1.NodeCondition{
Type: v1.NodeReady,
@ -370,7 +296,7 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *v1.Names
{
makeUnSchedulable: func(t *testing.T, n *v1.Node, nodeLister corelisters.NodeLister, c clientset.Interface) {
n.Spec.Unschedulable = true
if _, err := c.Core().Nodes().Update(n); err != nil {
if _, err := c.CoreV1().Nodes().Update(n); err != nil {
t.Fatalf("Failed to update node with unschedulable=true: %v", err)
}
err = waitForReflection(t, nodeLister, nodeKey, func(node interface{}) bool {
@ -386,7 +312,7 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *v1.Names
},
makeSchedulable: func(t *testing.T, n *v1.Node, nodeLister corelisters.NodeLister, c clientset.Interface) {
n.Spec.Unschedulable = false
if _, err := c.Core().Nodes().Update(n); err != nil {
if _, err := c.CoreV1().Nodes().Update(n); err != nil {
t.Fatalf("Failed to update node with unschedulable=false: %v", err)
}
err = waitForReflection(t, nodeLister, nodeKey, func(node interface{}) bool {
@ -406,7 +332,7 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *v1.Names
},
Conditions: []v1.NodeCondition{badCondition},
}
if _, err = c.Core().Nodes().UpdateStatus(n); err != nil {
if _, err = c.CoreV1().Nodes().UpdateStatus(n); err != nil {
t.Fatalf("Failed to update node with bad status condition: %v", err)
}
err = waitForReflection(t, nodeLister, nodeKey, func(node interface{}) bool {
@ -423,7 +349,7 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *v1.Names
},
Conditions: []v1.NodeCondition{goodCondition},
}
if _, err = c.Core().Nodes().UpdateStatus(n); err != nil {
if _, err = c.CoreV1().Nodes().UpdateStatus(n); err != nil {
t.Fatalf("Failed to update node with healthy status condition: %v", err)
}
err = waitForReflection(t, nodeLister, nodeKey, func(node interface{}) bool {
@ -437,29 +363,23 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *v1.Names
}
for i, mod := range nodeModifications {
unSchedNode, err := cs.Core().Nodes().Create(node)
unSchedNode, err := context.clientSet.Core().Nodes().Create(node)
if err != nil {
t.Fatalf("Failed to create node: %v", err)
}
// Apply the unschedulable modification to the node, and wait for the reflection
mod.makeUnSchedulable(t, unSchedNode, nodeLister, cs)
mod.makeUnSchedulable(t, unSchedNode, nodeLister, context.clientSet)
// Create the new pod, note that this needs to happen post unschedulable
// modification or we have a race in the test.
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "node-scheduling-test-pod"},
Spec: v1.PodSpec{
Containers: []v1.Container{{Name: "container", Image: e2e.GetPauseImageName(cs)}},
},
}
myPod, err := cs.Core().Pods(ns.Name).Create(pod)
myPod, err := createPausePodWithResource(context.clientSet, "node-scheduling-test-pod", context.ns.Name, nil)
if err != nil {
t.Fatalf("Failed to create pod: %v", err)
}
// There are no schedulable nodes - the pod shouldn't be scheduled.
err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(cs, myPod.Namespace, myPod.Name))
err = waitForPodToSchedule(context.clientSet, myPod)
if err == nil {
t.Errorf("Pod scheduled successfully on unschedulable nodes")
}
@ -470,25 +390,23 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *v1.Names
}
// Apply the schedulable modification to the node, and wait for the reflection
schedNode, err := cs.Core().Nodes().Get(unSchedNode.Name, metav1.GetOptions{})
schedNode, err := context.clientSet.CoreV1().Nodes().Get(unSchedNode.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to get node: %v", err)
}
mod.makeSchedulable(t, schedNode, nodeLister, cs)
mod.makeSchedulable(t, schedNode, nodeLister, context.clientSet)
// Wait until the pod is scheduled.
err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(cs, myPod.Namespace, myPod.Name))
if err != nil {
if err := waitForPodToSchedule(context.clientSet, myPod); err != nil {
t.Errorf("Test %d: failed to schedule a pod: %v", i, err)
} else {
t.Logf("Test %d: Pod got scheduled on a schedulable node", i)
}
err = cs.Core().Pods(ns.Name).Delete(myPod.Name, metav1.NewDeleteOptions(0))
if err != nil {
// Clean up.
if err := deletePod(context.clientSet, myPod.Name, myPod.Namespace); err != nil {
t.Errorf("Failed to delete pod: %v", err)
}
err = cs.Core().Nodes().Delete(schedNode.Name, nil)
err = context.clientSet.CoreV1().Nodes().Delete(schedNode.Name, nil)
if err != nil {
t.Errorf("Failed to delete node: %v", err)
}
@ -496,14 +414,6 @@ func DoTestUnschedulableNodes(t *testing.T, cs clientset.Interface, ns *v1.Names
}
func TestMultiScheduler(t *testing.T) {
_, s, _ := framework.RunAMaster(nil)
// TODO: Uncomment when fix #19254
// This seems to be a different issue - it still doesn't work.
// defer s.Close()
ns := framework.CreateTestingNamespace("multi-scheduler", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
/*
This integration tests the multi-scheduler feature in the following way:
1. create a default scheduler
@ -523,39 +433,10 @@ func TestMultiScheduler(t *testing.T) {
9. **check point-3**:
- testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 should NOT be scheduled
*/
// 1. create and start default-scheduler
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (Nodes).
defer clientSet.Core().Nodes().DeleteCollection(nil, metav1.ListOptions{})
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
schedulerConfigFactory := factory.NewConfigFactory(
v1.DefaultSchedulerName,
clientSet,
informerFactory.Core().V1().Nodes(),
informerFactory.Core().V1().Pods(),
informerFactory.Core().V1().PersistentVolumes(),
informerFactory.Core().V1().PersistentVolumeClaims(),
informerFactory.Core().V1().ReplicationControllers(),
informerFactory.Extensions().V1beta1().ReplicaSets(),
informerFactory.Apps().V1beta1().StatefulSets(),
informerFactory.Core().V1().Services(),
v1.DefaultHardPodAffinitySymmetricWeight,
enableEquivalenceCache,
)
schedulerConfig, err := schedulerConfigFactory.Create()
if err != nil {
t.Fatalf("Couldn't create scheduler config: %v", err)
}
eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: v1.DefaultSchedulerName})
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.Core().RESTClient()).Events("")})
informerFactory.Start(schedulerConfig.StopEverything)
sched, _ := scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{Config: schedulerConfig}, nil...)
sched.Run()
// default-scheduler will be stopped later
context := initTest(t, "multi-scheduler")
defer cleanupTest(t, context)
// 2. create a node
node := &v1.Node{
@ -567,25 +448,22 @@ func TestMultiScheduler(t *testing.T) {
},
},
}
clientSet.Core().Nodes().Create(node)
context.clientSet.Core().Nodes().Create(node)
// 3. create 3 pods for testing
podWithoutSchedulerName := createPod(clientSet, "pod-without-scheduler-name", "")
testPod, err := clientSet.Core().Pods(ns.Name).Create(podWithoutSchedulerName)
testPod, err := createPausePodWithResource(context.clientSet, "pod-without-scheduler-name", context.ns.Name, nil)
if err != nil {
t.Fatalf("Failed to create pod: %v", err)
}
schedulerFitsDefault := "default-scheduler"
podFitsDefault := createPod(clientSet, "pod-fits-default", schedulerFitsDefault)
testPodFitsDefault, err := clientSet.Core().Pods(ns.Name).Create(podFitsDefault)
defaultScheduler := "default-scheduler"
testPodFitsDefault, err := createPausePod(context.clientSet, &pausePodConfig{Name: "pod-fits-default", Namespace: context.ns.Name, SchedulerName: defaultScheduler})
if err != nil {
t.Fatalf("Failed to create pod: %v", err)
}
schedulerFitsFoo := "foo-scheduler"
podFitsFoo := createPod(clientSet, "pod-fits-foo", schedulerFitsFoo)
testPodFitsFoo, err := clientSet.Core().Pods(ns.Name).Create(podFitsFoo)
fooScheduler := "foo-scheduler"
testPodFitsFoo, err := createPausePod(context.clientSet, &pausePodConfig{Name: "pod-fits-foo", Namespace: context.ns.Name, SchedulerName: fooScheduler})
if err != nil {
t.Fatalf("Failed to create pod: %v", err)
}
@ -593,42 +471,39 @@ func TestMultiScheduler(t *testing.T) {
// 4. **check point-1**:
// - testPod, testPodFitsDefault should be scheduled
// - testPodFitsFoo should NOT be scheduled
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPod.Namespace, testPod.Name))
if err != nil {
if err := waitForPodToSchedule(context.clientSet, testPod); err != nil {
t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPod.Name, err)
} else {
t.Logf("Test MultiScheduler: %s Pod scheduled", testPod.Name)
}
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodFitsDefault.Namespace, testPodFitsDefault.Name))
if err != nil {
if err := waitForPodToSchedule(context.clientSet, testPodFitsDefault); err != nil {
t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPodFitsDefault.Name, err)
} else {
t.Logf("Test MultiScheduler: %s Pod scheduled", testPodFitsDefault.Name)
}
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodFitsFoo.Namespace, testPodFitsFoo.Name))
if err == nil {
if err := waitForPodToScheduleWithTimeout(context.clientSet, testPodFitsFoo, time.Second*5); err == nil {
t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodFitsFoo.Name, err)
} else {
t.Logf("Test MultiScheduler: %s Pod not scheduled", testPodFitsFoo.Name)
}
// 5. create and start a scheduler with name "foo-scheduler"
clientSet2 := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
informerFactory2 := informers.NewSharedInformerFactory(clientSet, 0)
clientSet2 := clientset.NewForConfigOrDie(&restclient.Config{Host: context.httpServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
informerFactory2 := informers.NewSharedInformerFactory(context.clientSet, 0)
schedulerConfigFactory2 := factory.NewConfigFactory(
"foo-scheduler",
fooScheduler,
clientSet2,
informerFactory.Core().V1().Nodes(),
informerFactory.Core().V1().Pods(),
informerFactory.Core().V1().PersistentVolumes(),
informerFactory.Core().V1().PersistentVolumeClaims(),
informerFactory.Core().V1().ReplicationControllers(),
informerFactory.Extensions().V1beta1().ReplicaSets(),
informerFactory.Apps().V1beta1().StatefulSets(),
informerFactory.Core().V1().Services(),
informerFactory2.Core().V1().Nodes(),
informerFactory2.Core().V1().Pods(),
informerFactory2.Core().V1().PersistentVolumes(),
informerFactory2.Core().V1().PersistentVolumeClaims(),
informerFactory2.Core().V1().ReplicationControllers(),
informerFactory2.Extensions().V1beta1().ReplicaSets(),
informerFactory2.Apps().V1beta1().StatefulSets(),
informerFactory2.Core().V1().Services(),
v1.DefaultHardPodAffinitySymmetricWeight,
enableEquivalenceCache,
)
@ -637,8 +512,8 @@ func TestMultiScheduler(t *testing.T) {
t.Errorf("Couldn't create scheduler config: %v", err)
}
eventBroadcaster2 := record.NewBroadcaster()
schedulerConfig2.Recorder = eventBroadcaster2.NewRecorder(api.Scheme, clientv1.EventSource{Component: "foo-scheduler"})
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet2.Core().RESTClient()).Events("")})
schedulerConfig2.Recorder = eventBroadcaster2.NewRecorder(api.Scheme, clientv1.EventSource{Component: fooScheduler})
eventBroadcaster2.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet2.CoreV1().RESTClient()).Events("")})
informerFactory2.Start(schedulerConfig2.StopEverything)
sched2, _ := scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{Config: schedulerConfig2}, nil...)
@ -647,7 +522,7 @@ func TestMultiScheduler(t *testing.T) {
// 6. **check point-2**:
// - testPodWithAnnotationFitsFoo should be scheduled
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodFitsFoo.Namespace, testPodFitsFoo.Name))
err = waitForPodToSchedule(context.clientSet, testPodFitsFoo)
if err != nil {
t.Errorf("Test MultiScheduler: %s Pod not scheduled, %v", testPodFitsFoo.Name, err)
} else {
@ -655,12 +530,10 @@ func TestMultiScheduler(t *testing.T) {
}
// 7. delete the pods that were scheduled by the default scheduler, and stop the default scheduler
err = clientSet.Core().Pods(ns.Name).Delete(testPod.Name, metav1.NewDeleteOptions(0))
if err != nil {
if err := deletePod(context.clientSet, testPod.Name, context.ns.Name); err != nil {
t.Errorf("Failed to delete pod: %v", err)
}
err = clientSet.Core().Pods(ns.Name).Delete(testPodFitsDefault.Name, metav1.NewDeleteOptions(0))
if err != nil {
if err := deletePod(context.clientSet, testPodFitsDefault.Name, context.ns.Name); err != nil {
t.Errorf("Failed to delete pod: %v", err)
}
@ -703,103 +576,35 @@ func TestMultiScheduler(t *testing.T) {
*/
}
func createPod(client clientset.Interface, name string, scheduler string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.PodSpec{
Containers: []v1.Container{{Name: "container", Image: e2e.GetPauseImageName(client)}},
SchedulerName: scheduler,
},
}
}
// This test will verify scheduler can work well regardless of whether kubelet is allocatable aware or not.
func TestAllocatable(t *testing.T) {
_, s, closeFn := framework.RunAMaster(nil)
defer closeFn()
ns := framework.CreateTestingNamespace("allocatable", s, t)
defer framework.DeleteTestingNamespace(ns, s, t)
// 1. create and start default-scheduler
clientSet := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
informerFactory := informers.NewSharedInformerFactory(clientSet, 0)
// NOTE: This test cannot run in parallel, because it is creating and deleting
// non-namespaced objects (Nodes).
defer clientSet.Core().Nodes().DeleteCollection(nil, metav1.ListOptions{})
schedulerConfigFactory := factory.NewConfigFactory(
v1.DefaultSchedulerName,
clientSet,
informerFactory.Core().V1().Nodes(),
informerFactory.Core().V1().Pods(),
informerFactory.Core().V1().PersistentVolumes(),
informerFactory.Core().V1().PersistentVolumeClaims(),
informerFactory.Core().V1().ReplicationControllers(),
informerFactory.Extensions().V1beta1().ReplicaSets(),
informerFactory.Apps().V1beta1().StatefulSets(),
informerFactory.Core().V1().Services(),
v1.DefaultHardPodAffinitySymmetricWeight,
enableEquivalenceCache,
)
schedulerConfig, err := schedulerConfigFactory.Create()
if err != nil {
t.Fatalf("Couldn't create scheduler config: %v", err)
}
eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: v1.DefaultSchedulerName})
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(clientSet.Core().RESTClient()).Events("")})
informerFactory.Start(schedulerConfig.StopEverything)
sched, _ := scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{Config: schedulerConfig}, nil...)
sched.Run()
// default-scheduler will be stopped later
defer close(schedulerConfig.StopEverything)
context := initTest(t, "allocatable")
defer cleanupTest(t, context)
// 2. create a node without allocatable awareness
node := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "node-allocatable-scheduler-test-node"},
Spec: v1.NodeSpec{Unschedulable: false},
Status: v1.NodeStatus{
Capacity: v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
v1.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI),
},
},
nodeRes := &v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
v1.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI),
}
allocNode, err := clientSet.Core().Nodes().Create(node)
allocNode, err := createNode(context.clientSet, "node-allocatable-scheduler-test-node", nodeRes)
if err != nil {
t.Fatalf("Failed to create node: %v", err)
}
// 3. create resource pod which requires less than Capacity
podResource := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "pod-test-allocatable"},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "container",
Image: e2e.GetPauseImageName(clientSet),
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(20, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20, resource.BinarySI),
},
},
},
},
},
podName := "pod-test-allocatable"
podRes := &v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(20, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(20, resource.BinarySI),
}
testAllocPod, err := clientSet.Core().Pods(ns.Name).Create(podResource)
testAllocPod, err := createPausePodWithResource(context.clientSet, podName, context.ns.Name, podRes)
if err != nil {
t.Fatalf("Test allocatable unawareness failed to create pod: %v", err)
}
// 4. Test: this test pod should be scheduled since api-server will use Capacity as Allocatable
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testAllocPod.Namespace, testAllocPod.Name))
err = waitForPodToScheduleWithTimeout(context.clientSet, testAllocPod, time.Second*5)
if err != nil {
t.Errorf("Test allocatable unawareness: %s Pod not scheduled: %v", testAllocPod.Name, err)
} else {
@ -820,24 +625,23 @@ func TestAllocatable(t *testing.T) {
},
}
if _, err := clientSet.Core().Nodes().UpdateStatus(allocNode); err != nil {
if _, err := context.clientSet.CoreV1().Nodes().UpdateStatus(allocNode); err != nil {
t.Fatalf("Failed to update node with Status.Allocatable: %v", err)
}
if err := clientSet.Core().Pods(ns.Name).Delete(podResource.Name, &metav1.DeleteOptions{}); err != nil {
t.Fatalf("Failed to remove first resource pod: %v", err)
if err := deletePod(context.clientSet, testAllocPod.Name, context.ns.Name); err != nil {
t.Fatalf("Failed to remove the first pod: %v", err)
}
// 6. Make another pod with different name, same resource request
podResource.ObjectMeta.Name = "pod-test-allocatable2"
testAllocPod2, err := clientSet.Core().Pods(ns.Name).Create(podResource)
podName2 := "pod-test-allocatable2"
testAllocPod2, err := createPausePodWithResource(context.clientSet, podName2, context.ns.Name, podRes)
if err != nil {
t.Fatalf("Test allocatable awareness failed to create pod: %v", err)
}
// 7. Test: this test pod should not be scheduled since it request more than Allocatable
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testAllocPod2.Namespace, testAllocPod2.Name))
if err == nil {
if err := waitForPodToScheduleWithTimeout(context.clientSet, testAllocPod2, time.Second*5); err == nil {
t.Errorf("Test allocatable awareness: %s Pod got scheduled unexpectedly, %v", testAllocPod2.Name, err)
} else {
t.Logf("Test allocatable awareness: %s Pod not scheduled as expected", testAllocPod2.Name)

View File

@ -0,0 +1,331 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scheduler
import (
"fmt"
"testing"
"time"
"k8s.io/api/core/v1"
clientv1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
informers "k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
clientv1core "k8s.io/client-go/kubernetes/typed/core/v1"
corelisters "k8s.io/client-go/listers/core/v1"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/plugin/pkg/scheduler"
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
"k8s.io/kubernetes/test/integration/framework"
"net/http/httptest"
)
type TestContext struct {
closeFn framework.CloseFunc
httpServer *httptest.Server
ns *v1.Namespace
clientSet *clientset.Clientset
informerFactory informers.SharedInformerFactory
schedulerConfigFactory scheduler.Configurator
schedulerConfig *scheduler.Config
scheduler *scheduler.Scheduler
}
// initTest initializes a test environment and creates a scheduler with default
// configuration.
func initTest(t *testing.T, nsPrefix string) *TestContext {
var context TestContext
_, context.httpServer, context.closeFn = framework.RunAMaster(nil)
context.ns = framework.CreateTestingNamespace(nsPrefix+string(uuid.NewUUID()), context.httpServer, t)
context.clientSet = clientset.NewForConfigOrDie(&restclient.Config{Host: context.httpServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: testapi.Groups[v1.GroupName].GroupVersion()}})
context.informerFactory = informers.NewSharedInformerFactory(context.clientSet, 0)
context.schedulerConfigFactory = factory.NewConfigFactory(
v1.DefaultSchedulerName,
context.clientSet,
context.informerFactory.Core().V1().Nodes(),
context.informerFactory.Core().V1().Pods(),
context.informerFactory.Core().V1().PersistentVolumes(),
context.informerFactory.Core().V1().PersistentVolumeClaims(),
context.informerFactory.Core().V1().ReplicationControllers(),
context.informerFactory.Extensions().V1beta1().ReplicaSets(),
context.informerFactory.Apps().V1beta1().StatefulSets(),
context.informerFactory.Core().V1().Services(),
v1.DefaultHardPodAffinitySymmetricWeight,
true,
)
var err error
context.schedulerConfig, err = context.schedulerConfigFactory.Create()
if err != nil {
t.Fatalf("Couldn't create scheduler config: %v", err)
}
eventBroadcaster := record.NewBroadcaster()
context.schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.Scheme, clientv1.EventSource{Component: v1.DefaultSchedulerName})
eventBroadcaster.StartRecordingToSink(&clientv1core.EventSinkImpl{Interface: clientv1core.New(context.clientSet.CoreV1().RESTClient()).Events("")})
context.informerFactory.Start(context.schedulerConfig.StopEverything)
context.scheduler, err = scheduler.NewFromConfigurator(&scheduler.FakeConfigurator{Config: context.schedulerConfig}, nil...)
if err != nil {
t.Fatalf("Couldn't create scheduler: %v", err)
}
context.scheduler.Run()
return &context
}
// cleanupTest deletes the scheduler and the test namespace. It should be called
// at the end of a test.
func cleanupTest(t *testing.T, context *TestContext) {
// Kill the scheduler.
close(context.schedulerConfig.StopEverything)
// Cleanup nodes.
context.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
framework.DeleteTestingNamespace(context.ns, context.httpServer, t)
context.closeFn()
}
// waitForReflection waits till the passFunc confirms that the object it expects
// to see is in the store. Used to observe reflected events.
func waitForReflection(t *testing.T, nodeLister corelisters.NodeLister, key string, passFunc func(n interface{}) bool) error {
nodes := []*v1.Node{}
err := wait.Poll(time.Millisecond*100, wait.ForeverTestTimeout, func() (bool, error) {
n, err := nodeLister.Get(key)
switch {
case err == nil && passFunc(n):
return true, nil
case errors.IsNotFound(err):
nodes = append(nodes, nil)
case err != nil:
t.Errorf("Unexpected error: %v", err)
default:
nodes = append(nodes, n)
}
return false, nil
})
if err != nil {
t.Logf("Logging consecutive node versions received from store:")
for i, n := range nodes {
t.Logf("%d: %#v", i, n)
}
}
return err
}
// nodeHasLabels returns a function that checks if a node has all the given labels.
func nodeHasLabels(cs clientset.Interface, nodeName string, labels map[string]string) wait.ConditionFunc {
return func() (bool, error) {
node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
if errors.IsNotFound(err) {
return false, nil
}
if err != nil {
// This could be a connection error so we want to retry.
return false, nil
}
for k, v := range labels {
if node.Labels == nil || node.Labels[k] != v {
return false, nil
}
}
return true, nil
}
}
// waitForNodeLabels waits for the given node to have all the given labels.
func waitForNodeLabels(cs clientset.Interface, nodeName string, labels map[string]string) error {
return wait.Poll(time.Millisecond*100, wait.ForeverTestTimeout, nodeHasLabels(cs, nodeName, labels))
}
// createNode creates a node with the given resource list and
// returns a pointer and error status. If 'res' is nil, a predefined amount of
// resource will be used.
func createNode(cs clientset.Interface, name string, res *v1.ResourceList) (*v1.Node, error) {
// if resource is nil, we use a default amount of resources for the node.
if res == nil {
res = &v1.ResourceList{
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
}
}
n := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: name},
Spec: v1.NodeSpec{Unschedulable: false},
Status: v1.NodeStatus{
Capacity: *res,
},
}
return cs.CoreV1().Nodes().Create(n)
}
// createNodes creates `numNodes` nodes. The created node names will be in the
// form of "`prefix`-X" where X is an ordinal.
func createNodes(cs clientset.Interface, prefix string, res *v1.ResourceList, numNodes int) ([]*v1.Node, error) {
nodes := make([]*v1.Node, numNodes)
for i := 0; i < numNodes; i++ {
nodeName := fmt.Sprintf("%v-%d", prefix, i)
node, err := createNode(cs, nodeName, res)
if err != nil {
return nodes[:], err
}
nodes[i] = node
}
return nodes[:], nil
}
type pausePodConfig struct {
Name string
Namespace string
Affinity *v1.Affinity
Annotations, Labels, NodeSelector map[string]string
Resources *v1.ResourceRequirements
Tolerations []v1.Toleration
NodeName string
SchedulerName string
}
// initPausePod initializes a pod API object from the given config. It is used
// mainly in pod creation process.
func initPausePod(cs clientset.Interface, conf *pausePodConfig) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: conf.Name,
Labels: conf.Labels,
Annotations: conf.Annotations,
},
Spec: v1.PodSpec{
NodeSelector: conf.NodeSelector,
Affinity: conf.Affinity,
Containers: []v1.Container{
{
Name: conf.Name,
Image: framework.GetPauseImageName(cs),
},
},
Tolerations: conf.Tolerations,
NodeName: conf.NodeName,
SchedulerName: conf.SchedulerName,
},
}
if conf.Resources != nil {
pod.Spec.Containers[0].Resources = *conf.Resources
}
return pod
}
// createPausePod creates a pod with "Pause" image and the given config and
// return its pointer and error status.
func createPausePod(cs clientset.Interface, conf *pausePodConfig) (*v1.Pod, error) {
p := initPausePod(cs, conf)
return cs.CoreV1().Pods(conf.Namespace).Create(p)
}
// createPausePodWithResource creates a pod with "Pause" image and the given
// resources and returns its pointer and error status. The resource list can be
// nil.
func createPausePodWithResource(cs clientset.Interface, podName string, nsName string, res *v1.ResourceList) (*v1.Pod, error) {
var conf pausePodConfig
if res == nil {
conf = pausePodConfig{
Name: podName,
Namespace: nsName,
}
} else {
conf = pausePodConfig{
Name: podName,
Namespace: nsName,
Resources: &v1.ResourceRequirements{
Requests: *res,
},
}
}
return createPausePod(cs, &conf)
}
// runPausePod creates a pod with "Pause" image and the given config and waits
// until it is scheduled. It returns its pointer and error status.
func runPausePod(cs clientset.Interface, conf *pausePodConfig) (*v1.Pod, error) {
p := initPausePod(cs, conf)
pod, err := cs.CoreV1().Pods(conf.Namespace).Create(p)
if err != nil {
return nil, fmt.Errorf("Error creating pause pod: %v", err)
}
if err = waitForPodToSchedule(cs, pod); err != nil {
return pod, fmt.Errorf("Pod %v didn't schedule successfully. Error: %v", pod.Name, err)
}
if pod, err = cs.CoreV1().Pods(conf.Namespace).Get(conf.Name, metav1.GetOptions{}); err != nil {
return pod, fmt.Errorf("Error getting pod %v info: %v", conf.Name, err)
}
return pod, nil
}
// podScheduled returns true if a node is assigned to the given pod.
func podScheduled(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
return func() (bool, error) {
pod, err := c.CoreV1().Pods(podNamespace).Get(podName, metav1.GetOptions{})
if errors.IsNotFound(err) {
return false, nil
}
if err != nil {
// This could be a connection error so we want to retry.
return false, nil
}
if pod.Spec.NodeName == "" {
return false, nil
}
return true, nil
}
}
// waitForPodToScheduleWithTimeout waits for a pod to get scheduled and returns
// an error if it does not scheduled within the given timeout.
func waitForPodToScheduleWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
return wait.Poll(time.Second, timeout, podScheduled(cs, pod.Namespace, pod.Name))
}
// waitForPodToSchedule waits for a pod to get scheduled and returns an error if
// it does not scheduled within the timeout duration (30 seconds).
func waitForPodToSchedule(cs clientset.Interface, pod *v1.Pod) error {
return waitForPodToScheduleWithTimeout(cs, pod, wait.ForeverTestTimeout)
}
// deletePod deletes the given pod in the given namespace.
func deletePod(cs clientset.Interface, podName string, nsName string) error {
return cs.CoreV1().Pods(nsName).Delete(podName, metav1.NewDeleteOptions(0))
}
// printAllPods prints a list of all the pods and their node names. This is used
// for debugging.
func printAllPods(t *testing.T, cs clientset.Interface, nsName string) {
podList, err := cs.CoreV1().Pods(nsName).List(metav1.ListOptions{})
if err != nil {
t.Logf("Error getting pods: %v", err)
}
for _, pod := range podList.Items {
t.Logf("Pod:\n\tName:%v\n\tNamespace:%v\n\tNode Name:%v\n", pod.Name, pod.Namespace, pod.Spec.NodeName)
}
}