mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 03:11:40 +00:00
Converted a couple of e2e priority tests to integration tests.
This commit is contained in:
parent
684ac3f665
commit
238cb5bfc1
174
test/integration/scheduler/priorities_test.go
Normal file
174
test/integration/scheduler/priorities_test.go
Normal file
@ -0,0 +1,174 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scheduler
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
// This file tests the scheduler priority functions.
|
||||
|
||||
// TestNodeAffinity verifies that scheduler's node affinity priority function
|
||||
// works correctly.
|
||||
func TestNodeAffinity(t *testing.T) {
|
||||
context := initTest(t, "node-affinity")
|
||||
defer cleanupTest(t, context)
|
||||
// Add a few nodes.
|
||||
nodes, err := createNodes(context.clientSet, "testnode", 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
// Add a label to one of the nodes.
|
||||
labeledNode := nodes[1]
|
||||
labelKey := "kubernetes.io/node-topologyKey"
|
||||
labelValue := "topologyvalue"
|
||||
labels := map[string]string{
|
||||
labelKey: labelValue,
|
||||
}
|
||||
if err = testutils.AddLabelsToNode(context.clientSet, labeledNode.Name, labels); err != nil {
|
||||
t.Fatalf("Cannot add labels to node: %v", err)
|
||||
}
|
||||
if err = waitForNodeLabels(context.clientSet, labeledNode.Name, labels); err != nil {
|
||||
t.Fatalf("Adding labels to node didn't succeed: %v", err)
|
||||
}
|
||||
// Create a pod with node affinity.
|
||||
podName := "pod-with-node-affinity"
|
||||
pod, err := runPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: podName,
|
||||
Namespace: context.ns.Name,
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
|
||||
{
|
||||
Preference: v1.NodeSelectorTerm{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
{
|
||||
Key: labelKey,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{labelValue},
|
||||
},
|
||||
},
|
||||
},
|
||||
Weight: 20,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error running pause pod: %v", err)
|
||||
}
|
||||
if pod.Spec.NodeName != labeledNode.Name {
|
||||
t.Errorf("Pod %v got scheduled on an unexpected node: %v. Expected node: %v.", podName, pod.Spec.NodeName, labeledNode.Name)
|
||||
} else {
|
||||
t.Logf("Pod %v got successfully scheduled on node %v.", podName, pod.Spec.NodeName)
|
||||
}
|
||||
}
|
||||
|
||||
// TestPodAffinity verifies that scheduler's pod affinity priority function
|
||||
// works correctly.
|
||||
func TestPodAffinity(t *testing.T) {
|
||||
context := initTest(t, "pod-affinity")
|
||||
defer cleanupTest(t, context)
|
||||
// Add a few nodes.
|
||||
nodesInTopology, err := createNodes(context.clientSet, "in-topology", 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
topologyKey := "node-topologykey"
|
||||
topologyValue := "topologyvalue"
|
||||
nodeLabels := map[string]string{
|
||||
topologyKey: topologyValue,
|
||||
}
|
||||
for _, node := range nodesInTopology {
|
||||
// Add topology key to all the nodes.
|
||||
if err = testutils.AddLabelsToNode(context.clientSet, node.Name, nodeLabels); err != nil {
|
||||
t.Fatalf("Cannot add labels to node %v: %v", node.Name, err)
|
||||
}
|
||||
if err = waitForNodeLabels(context.clientSet, node.Name, nodeLabels); err != nil {
|
||||
t.Fatalf("Adding labels to node %v didn't succeed: %v", node.Name, err)
|
||||
}
|
||||
}
|
||||
// Add a pod with a label and wait for it to schedule.
|
||||
labelKey := "service"
|
||||
labelValue := "S1"
|
||||
_, err = runPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: "attractor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Labels: map[string]string{labelKey: labelValue},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error running the attractor pod: %v", err)
|
||||
}
|
||||
// Add a few more nodes without the topology label.
|
||||
_, err = createNodes(context.clientSet, "other-node", 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create the second set of nodes: %v", err)
|
||||
}
|
||||
// Add a new pod with affinity to the attractor pod.
|
||||
podName := "pod-with-podaffinity"
|
||||
pod, err := runPausePod(context.clientSet, &pausePodConfig{
|
||||
Name: podName,
|
||||
Namespace: context.ns.Name,
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{
|
||||
{
|
||||
PodAffinityTerm: v1.PodAffinityTerm{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: labelKey,
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{labelValue, "S3"},
|
||||
},
|
||||
{
|
||||
Key: labelKey,
|
||||
Operator: metav1.LabelSelectorOpNotIn,
|
||||
Values: []string{"S2"},
|
||||
}, {
|
||||
Key: labelKey,
|
||||
Operator: metav1.LabelSelectorOpExists,
|
||||
},
|
||||
},
|
||||
},
|
||||
TopologyKey: topologyKey,
|
||||
Namespaces: []string{context.ns.Name},
|
||||
},
|
||||
Weight: 50,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Error running pause pod: %v", err)
|
||||
}
|
||||
// The new pod must be scheduled on one of the nodes with the same topology
|
||||
// key-value as the attractor pod.
|
||||
for _, node := range nodesInTopology {
|
||||
if node.Name == pod.Spec.NodeName {
|
||||
t.Logf("Pod %v got successfully scheduled on node %v.", podName, pod.Spec.NodeName)
|
||||
return
|
||||
}
|
||||
}
|
||||
t.Errorf("Pod %v got scheduled on an unexpected node: %v.", podName, pod.Spec.NodeName)
|
||||
}
|
@ -184,7 +184,7 @@ func createNode(cs clientset.Interface, name string) (*v1.Node, error) {
|
||||
return createNodeWithResource(cs, name, &res)
|
||||
}
|
||||
|
||||
// createNodes created `numNodes` nodes. The created node names will be in the
|
||||
// createNodes creates `numNodes` nodes. The created node names will be in the
|
||||
// form of "`prefix`-X" where X is an ordinal.
|
||||
func createNodes(cs clientset.Interface, prefix string, numNodes int) ([]*v1.Node, error) {
|
||||
nodes := make([]*v1.Node, numNodes)
|
||||
@ -247,7 +247,7 @@ func createPausePod(cs clientset.Interface, conf *pausePodConfig) (*v1.Pod, erro
|
||||
}
|
||||
|
||||
// createPausePodWithResource creates a pod with "Pause" image and the given
|
||||
// resources and return its pointer and error status.
|
||||
// resources and returns its pointer and error status.
|
||||
func createPausePodWithResource(cs clientset.Interface, podName string, nsName string, res *v1.ResourceList) (*v1.Pod, error) {
|
||||
conf := pausePodConfig{
|
||||
Name: podName,
|
||||
@ -259,7 +259,7 @@ func createPausePodWithResource(cs clientset.Interface, podName string, nsName s
|
||||
return createPausePod(cs, &conf)
|
||||
}
|
||||
|
||||
// createDefaultPausePod creates a pod with "Pause" image and return its pointer
|
||||
// createDefaultPausePod creates a pod with "Pause" image and returns its pointer
|
||||
// and error status.
|
||||
func createDefaultPausePod(cs clientset.Interface, podName string, nsName string) (*v1.Pod, error) {
|
||||
conf := pausePodConfig{
|
||||
|
Loading…
Reference in New Issue
Block a user