mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 02:41:25 +00:00
Use NodeWrapper to directly initialize node with labels
Using NodeWrapper in the integration tests gives more flexibility when creating nodes. For instance, tests can create nodes with labels or with a specific sets of resources. Also, NodeWrapper initialises a node with a capacity of 32 pods, which can be overridden by the caller. This makes sure that a node is usable as soon as it is created.
This commit is contained in:
parent
908847c01e
commit
2e1042f959
@ -19,6 +19,7 @@ go_library(
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
|
@ -19,7 +19,8 @@ package testing
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
@ -362,7 +363,8 @@ type NodeWrapper struct{ v1.Node }
|
||||
|
||||
// MakeNode creates a Node wrapper.
|
||||
func MakeNode() *NodeWrapper {
|
||||
return &NodeWrapper{v1.Node{}}
|
||||
w := &NodeWrapper{v1.Node{}}
|
||||
return w.Capacity(nil)
|
||||
}
|
||||
|
||||
// Obj returns the inner Node.
|
||||
@ -390,3 +392,28 @@ func (n *NodeWrapper) Label(k, v string) *NodeWrapper {
|
||||
n.Labels[k] = v
|
||||
return n
|
||||
}
|
||||
|
||||
// Capacity sets the capacity and the allocatable resources of the inner node.
|
||||
// Each entry in `resources` corresponds to a resource name and its quantity.
|
||||
// By default, the capacity and allocatable number of pods are set to 32.
|
||||
func (n *NodeWrapper) Capacity(resources map[v1.ResourceName]string) *NodeWrapper {
|
||||
res := v1.ResourceList{
|
||||
v1.ResourcePods: resource.MustParse("32"),
|
||||
}
|
||||
for name, value := range resources {
|
||||
res[name] = resource.MustParse(value)
|
||||
}
|
||||
n.Status.Capacity, n.Status.Allocatable = res, res
|
||||
return n
|
||||
}
|
||||
|
||||
// Images sets the images of the inner node. Each entry in `images` corresponds
|
||||
// to an image name and its size in bytes.
|
||||
func (n *NodeWrapper) Images(images map[string]int64) *NodeWrapper {
|
||||
var containerImages []v1.ContainerImage
|
||||
for name, size := range images {
|
||||
containerImages = append(containerImages, v1.ContainerImage{Names: []string{name}, SizeBytes: size})
|
||||
}
|
||||
n.Status.Images = containerImages
|
||||
return n
|
||||
}
|
||||
|
@ -86,10 +86,10 @@ go_library(
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/controller/disruption:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/testing:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@ -33,6 +33,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
|
||||
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
testutils "k8s.io/kubernetes/test/integration/util"
|
||||
)
|
||||
|
||||
@ -1130,7 +1131,7 @@ func TestBindPlugin(t *testing.T) {
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
// Add a few nodes.
|
||||
_, err := createNodes(testCtx.ClientSet, "test-node", nil, 2)
|
||||
_, err := createNodes(testCtx.ClientSet, "test-node", st.MakeNode(), 2)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
@ -1776,12 +1777,12 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
// Add one node.
|
||||
nodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
nodeRes := map[v1.ResourceName]string{
|
||||
v1.ResourcePods: "32",
|
||||
v1.ResourceCPU: "500m",
|
||||
v1.ResourceMemory: "500",
|
||||
}
|
||||
_, err := createNodes(testCtx.ClientSet, "test-node", nodeRes, 1)
|
||||
_, err := createNodes(testCtx.ClientSet, "test-node", st.MakeNode().Capacity(nodeRes), 1)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
@ -1841,7 +1842,7 @@ func initTestSchedulerForFrameworkTest(t *testing.T, testCtx *testutils.TestCont
|
||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||
|
||||
if nodeCount > 0 {
|
||||
_, err := createNodes(testCtx.ClientSet, "test-node", nil, nodeCount)
|
||||
_, err := createNodes(testCtx.ClientSet, "test-node", st.MakeNode(), nodeCount)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@ -43,25 +42,12 @@ const pollInterval = 100 * time.Millisecond
|
||||
func TestInterPodAffinity(t *testing.T) {
|
||||
testCtx := initTest(t, "inter-pod-affinity")
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
// Add a few nodes.
|
||||
nodes, err := createNodes(testCtx.ClientSet, "testnode", nil, 2)
|
||||
|
||||
// Add a few nodes with labels
|
||||
nodes, err := createNodes(testCtx.ClientSet, "testnode", st.MakeNode().Label("region", "r1").Label("zone", "z11"), 2)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
// Add labels to the nodes.
|
||||
labels1 := map[string]string{
|
||||
"region": "r1",
|
||||
"zone": "z11",
|
||||
}
|
||||
for _, node := range nodes {
|
||||
// TODO(nodo): Use PodWrapper to directly initialize node with labels.
|
||||
if err = utils.AddLabelsToNode(testCtx.ClientSet, node.Name, labels1); err != nil {
|
||||
t.Fatalf("Cannot add labels to node: %v", err)
|
||||
}
|
||||
if err = waitForNodeLabels(testCtx.ClientSet, node.Name, labels1); err != nil {
|
||||
t.Fatalf("Adding labels to node didn't succeed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
cs := testCtx.ClientSet
|
||||
podLabel := map[string]string{"service": "securityscan"}
|
||||
@ -886,7 +872,7 @@ func TestEvenPodsSpreadPredicate(t *testing.T) {
|
||||
ns := testCtx.NS.Name
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
// Add 4 nodes.
|
||||
nodes, err := createNodes(cs, "node", nil, 4)
|
||||
nodes, err := createNodes(cs, "node", st.MakeNode(), 4)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
@ -896,7 +882,6 @@ func TestEvenPodsSpreadPredicate(t *testing.T) {
|
||||
"zone": fmt.Sprintf("zone-%d", i/2),
|
||||
"node": node.Name,
|
||||
}
|
||||
// TODO(nodo): Use PodWrapper to directly initialize node with labels.
|
||||
if err = utils.AddLabelsToNode(cs, node.Name, labels); err != nil {
|
||||
t.Fatalf("Cannot add labels to node: %v", err)
|
||||
}
|
||||
@ -1051,7 +1036,7 @@ func TestUnschedulablePodBecomesSchedulable(t *testing.T) {
|
||||
Name: "pod-1",
|
||||
},
|
||||
update: func(cs kubernetes.Interface, _ string) error {
|
||||
_, err := createNode(cs, "node-added", nil)
|
||||
_, err := createNode(cs, st.MakeNode().Name("node-added").Obj())
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create node: %v", err)
|
||||
}
|
||||
@ -1061,7 +1046,7 @@ func TestUnschedulablePodBecomesSchedulable(t *testing.T) {
|
||||
{
|
||||
name: "node gets taint removed",
|
||||
init: func(cs kubernetes.Interface, _ string) error {
|
||||
node, err := createNode(cs, "node-tainted", nil)
|
||||
node, err := createNode(cs, st.MakeNode().Name("node-tainted").Obj())
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create node: %v", err)
|
||||
}
|
||||
@ -1085,10 +1070,8 @@ func TestUnschedulablePodBecomesSchedulable(t *testing.T) {
|
||||
{
|
||||
name: "other pod gets deleted",
|
||||
init: func(cs kubernetes.Interface, ns string) error {
|
||||
nodeResources := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI),
|
||||
}
|
||||
_, err := createNode(cs, "node-scheduler-integration-test", nodeResources)
|
||||
nodeObject := st.MakeNode().Name("node-scheduler-integration-test").Capacity(map[v1.ResourceName]string{v1.ResourcePods: "1"}).Obj()
|
||||
_, err := createNode(cs, nodeObject)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create node: %v", err)
|
||||
}
|
||||
@ -1111,14 +1094,10 @@ func TestUnschedulablePodBecomesSchedulable(t *testing.T) {
|
||||
{
|
||||
name: "pod with pod-affinity gets added",
|
||||
init: func(cs kubernetes.Interface, _ string) error {
|
||||
node, err := createNode(cs, "node-1", nil)
|
||||
_, err := createNode(cs, st.MakeNode().Name("node-1").Label("region", "test").Obj())
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create node: %v", err)
|
||||
}
|
||||
// TODO(nodo): Use PodWrapper to directly initialize node with labels.
|
||||
if err := utils.AddLabelsToNode(cs, node.Name, map[string]string{"region": "test"}); err != nil {
|
||||
return fmt.Errorf("cannot add labels to node: %v", err)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
pod: &pausePodConfig{
|
||||
@ -1155,14 +1134,10 @@ func TestUnschedulablePodBecomesSchedulable(t *testing.T) {
|
||||
{
|
||||
name: "scheduled pod gets updated to match affinity",
|
||||
init: func(cs kubernetes.Interface, ns string) error {
|
||||
node, err := createNode(cs, "node-1", nil)
|
||||
_, err := createNode(cs, st.MakeNode().Name("node-1").Label("region", "test").Obj())
|
||||
if err != nil {
|
||||
return fmt.Errorf("cannot create node: %v", err)
|
||||
}
|
||||
// TODO(nodo): Use PodWrapper to directly initialize node with labels.
|
||||
if err := utils.AddLabelsToNode(cs, node.Name, map[string]string{"region": "test"}); err != nil {
|
||||
return fmt.Errorf("cannot add labels to node: %v", err)
|
||||
}
|
||||
if _, err := createPausePod(cs, initPausePod(&pausePodConfig{Name: "pod-to-be-updated", Namespace: ns})); err != nil {
|
||||
return fmt.Errorf("cannot create pod: %v", err)
|
||||
}
|
||||
|
@ -47,9 +47,9 @@ import (
|
||||
schedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/priority"
|
||||
testutils "k8s.io/kubernetes/test/integration/util"
|
||||
"k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
var lowPriority, mediumPriority, highPriority = int32(100), int32(200), int32(300)
|
||||
@ -379,21 +379,14 @@ func TestPreemption(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create a node with some resources and a label.
|
||||
nodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
nodeRes := map[v1.ResourceName]string{
|
||||
v1.ResourcePods: "32",
|
||||
v1.ResourceCPU: "500m",
|
||||
v1.ResourceMemory: "500",
|
||||
}
|
||||
node, err := createNode(testCtx.ClientSet, "node1", nodeRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
nodeLabels := map[string]string{"node": node.Name}
|
||||
if err = utils.AddLabelsToNode(testCtx.ClientSet, node.Name, nodeLabels); err != nil {
|
||||
t.Fatalf("Cannot add labels to node: %v", err)
|
||||
}
|
||||
if err = waitForNodeLabels(testCtx.ClientSet, node.Name, nodeLabels); err != nil {
|
||||
t.Fatalf("Adding labels to node didn't succeed: %v", err)
|
||||
nodeObject := st.MakeNode().Name("node1").Capacity(nodeRes).Label("node", "node1").Obj()
|
||||
if _, err := createNode(testCtx.ClientSet, nodeObject); err != nil {
|
||||
t.Fatalf("Error creating node: %v", err)
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@ -481,13 +474,13 @@ func TestNonPreemption(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
// Create a node with some resources and a label.
|
||||
nodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
// Create a node with some resources
|
||||
nodeRes := map[v1.ResourceName]string{
|
||||
v1.ResourcePods: "32",
|
||||
v1.ResourceCPU: "500m",
|
||||
v1.ResourceMemory: "500",
|
||||
}
|
||||
_, err := createNode(testCtx.ClientSet, "node1", nodeRes)
|
||||
_, err := createNode(testCtx.ClientSet, st.MakeNode().Name("node1").Capacity(nodeRes).Obj())
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
@ -557,13 +550,13 @@ func TestDisablePreemption(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
// Create a node with some resources and a label.
|
||||
nodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
// Create a node with some resources
|
||||
nodeRes := map[v1.ResourceName]string{
|
||||
v1.ResourcePods: "32",
|
||||
v1.ResourceCPU: "500m",
|
||||
v1.ResourceMemory: "500",
|
||||
}
|
||||
_, err := createNode(testCtx.ClientSet, "node1", nodeRes)
|
||||
_, err := createNode(testCtx.ClientSet, st.MakeNode().Name("node1").Capacity(nodeRes).Obj())
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
@ -664,13 +657,13 @@ func TestPodPriorityResolution(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
// Create a node with some resources and a label.
|
||||
nodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
// Create a node with some resources
|
||||
nodeRes := map[v1.ResourceName]string{
|
||||
v1.ResourcePods: "32",
|
||||
v1.ResourceCPU: "500m",
|
||||
v1.ResourceMemory: "500",
|
||||
}
|
||||
_, err := createNode(testCtx.ClientSet, "node1", nodeRes)
|
||||
_, err := createNode(testCtx.ClientSet, st.MakeNode().Name("node1").Capacity(nodeRes).Obj())
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
@ -754,13 +747,13 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
// Create a node with some resources and a label.
|
||||
nodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
// Create a node with some resources
|
||||
nodeRes := map[v1.ResourceName]string{
|
||||
v1.ResourcePods: "32",
|
||||
v1.ResourceCPU: "500m",
|
||||
v1.ResourceMemory: "500",
|
||||
}
|
||||
_, err := createNode(testCtx.ClientSet, "node1", nodeRes)
|
||||
_, err := createNode(testCtx.ClientSet, st.MakeNode().Name("node1").Capacity(nodeRes).Obj())
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
@ -855,13 +848,13 @@ func TestPreemptionRaces(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
// Create a node with some resources and a label.
|
||||
nodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(5000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(5000, resource.DecimalSI),
|
||||
// Create a node with some resources
|
||||
nodeRes := map[v1.ResourceName]string{
|
||||
v1.ResourcePods: "100",
|
||||
v1.ResourceCPU: "5000m",
|
||||
v1.ResourceMemory: "5000",
|
||||
}
|
||||
_, err := createNode(testCtx.ClientSet, "node1", nodeRes)
|
||||
_, err := createNode(testCtx.ClientSet, st.MakeNode().Name("node1").Capacity(nodeRes).Obj())
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
@ -954,13 +947,13 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
|
||||
defer cleanupPodsInNamespace(cs, t, testCtx.NS.Name)
|
||||
|
||||
// Create a node with some resources and a label.
|
||||
nodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
// Create a node with some resources
|
||||
nodeRes := map[v1.ResourceName]string{
|
||||
v1.ResourcePods: "32",
|
||||
v1.ResourceCPU: "500m",
|
||||
v1.ResourceMemory: "500",
|
||||
}
|
||||
_, err := createNode(testCtx.ClientSet, "node1", nodeRes)
|
||||
_, err := createNode(testCtx.ClientSet, st.MakeNode().Name("node1").Capacity(nodeRes).Obj())
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
@ -1069,15 +1062,15 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(100, resource.DecimalSI)},
|
||||
}
|
||||
defaultNodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
defaultNodeRes := map[v1.ResourceName]string{
|
||||
v1.ResourcePods: "32",
|
||||
v1.ResourceCPU: "500m",
|
||||
v1.ResourceMemory: "500",
|
||||
}
|
||||
|
||||
type nodeConfig struct {
|
||||
name string
|
||||
res *v1.ResourceList
|
||||
res map[v1.ResourceName]string
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
@ -1253,7 +1246,7 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
for _, nodeConf := range test.nodes {
|
||||
_, err := createNode(cs, nodeConf.name, nodeConf.res)
|
||||
_, err := createNode(cs, st.MakeNode().Name(nodeConf.name).Capacity(nodeConf.res).Obj())
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating node %v: %v", nodeConf.name, err)
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ func TestNodeAffinity(t *testing.T) {
|
||||
testCtx := initTest(t, "node-affinity")
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
// Add a few nodes.
|
||||
nodes, err := createNodes(testCtx.ClientSet, "testnode", nil, 5)
|
||||
nodes, err := createNodes(testCtx.ClientSet, "testnode", st.MakeNode(), 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
@ -97,23 +97,11 @@ func TestPodAffinity(t *testing.T) {
|
||||
testCtx := initTest(t, "pod-affinity")
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
// Add a few nodes.
|
||||
nodesInTopology, err := createNodes(testCtx.ClientSet, "in-topology", nil, 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
topologyKey := "node-topologykey"
|
||||
topologyValue := "topologyvalue"
|
||||
nodeLabels := map[string]string{
|
||||
topologyKey: topologyValue,
|
||||
}
|
||||
for _, node := range nodesInTopology {
|
||||
// Add topology key to all the nodes.
|
||||
if err = utils.AddLabelsToNode(testCtx.ClientSet, node.Name, nodeLabels); err != nil {
|
||||
t.Fatalf("Cannot add labels to node %v: %v", node.Name, err)
|
||||
}
|
||||
if err = waitForNodeLabels(testCtx.ClientSet, node.Name, nodeLabels); err != nil {
|
||||
t.Fatalf("Adding labels to node %v didn't succeed: %v", node.Name, err)
|
||||
}
|
||||
nodesInTopology, err := createNodes(testCtx.ClientSet, "in-topology", st.MakeNode().Label(topologyKey, topologyValue), 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
// Add a pod with a label and wait for it to schedule.
|
||||
labelKey := "service"
|
||||
@ -127,7 +115,7 @@ func TestPodAffinity(t *testing.T) {
|
||||
t.Fatalf("Error running the attractor pod: %v", err)
|
||||
}
|
||||
// Add a few more nodes without the topology label.
|
||||
_, err = createNodes(testCtx.ClientSet, "other-node", nil, 5)
|
||||
_, err = createNodes(testCtx.ClientSet, "other-node", st.MakeNode(), 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create the second set of nodes: %v", err)
|
||||
}
|
||||
@ -187,22 +175,20 @@ func TestImageLocality(t *testing.T) {
|
||||
testCtx := initTest(t, "image-locality")
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
// We use a fake large image as the test image used by the pod, which has relatively large image size.
|
||||
image := v1.ContainerImage{
|
||||
Names: []string{
|
||||
"fake-large-image:v1",
|
||||
},
|
||||
SizeBytes: 3000 * 1024 * 1024,
|
||||
}
|
||||
|
||||
// Create a node with the large image.
|
||||
nodeWithLargeImage, err := createNodeWithImages(testCtx.ClientSet, "testnode-large-image", nil, []v1.ContainerImage{image})
|
||||
// We use a fake large image as the test image used by the pod, which has
|
||||
// relatively large image size.
|
||||
imageName := "fake-large-image:v1"
|
||||
nodeWithLargeImage, err := createNode(
|
||||
testCtx.ClientSet,
|
||||
st.MakeNode().Name("testnode-large-image").Images(map[string]int64{imageName: 3000 * 1024 * 1024}).Obj(),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create node with a large image: %v", err)
|
||||
}
|
||||
|
||||
// Add a few nodes.
|
||||
_, err = createNodes(testCtx.ClientSet, "testnode", nil, 10)
|
||||
_, err = createNodes(testCtx.ClientSet, "testnode", st.MakeNode(), 10)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create nodes: %v", err)
|
||||
}
|
||||
@ -212,7 +198,7 @@ func TestImageLocality(t *testing.T) {
|
||||
pod, err := runPodWithContainers(testCtx.ClientSet, initPodWithContainers(testCtx.ClientSet, &podWithContainersConfig{
|
||||
Name: podName,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Containers: makeContainersWithImages(image.Names),
|
||||
Containers: makeContainersWithImages([]string{imageName}),
|
||||
}))
|
||||
if err != nil {
|
||||
t.Fatalf("error running pod with images: %v", err)
|
||||
@ -249,7 +235,7 @@ func TestEvenPodsSpreadPriority(t *testing.T) {
|
||||
ns := testCtx.NS.Name
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
// Add 4 nodes.
|
||||
nodes, err := createNodes(cs, "node", nil, 4)
|
||||
nodes, err := createNodes(cs, "node", st.MakeNode(), 4)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
|
@ -40,6 +40,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/profile"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
testutils "k8s.io/kubernetes/test/integration/util"
|
||||
)
|
||||
@ -697,12 +698,12 @@ func TestAllocatable(t *testing.T) {
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
// 2. create a node without allocatable awareness
|
||||
nodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI),
|
||||
nodeRes := map[v1.ResourceName]string{
|
||||
v1.ResourcePods: "32",
|
||||
v1.ResourceCPU: "30m",
|
||||
v1.ResourceMemory: "30",
|
||||
}
|
||||
allocNode, err := createNode(testCtx.ClientSet, "node-allocatable-scheduler-test-node", nodeRes)
|
||||
allocNode, err := createNode(testCtx.ClientSet, st.MakeNode().Name("node-allocatable-scheduler-test-node").Capacity(nodeRes).Obj())
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
}
|
||||
@ -775,15 +776,15 @@ func TestSchedulerInformers(t *testing.T) {
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)},
|
||||
}
|
||||
defaultNodeRes := &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.BinarySI),
|
||||
defaultNodeRes := map[v1.ResourceName]string{
|
||||
v1.ResourcePods: "32",
|
||||
v1.ResourceCPU: "500m",
|
||||
v1.ResourceMemory: "500",
|
||||
}
|
||||
|
||||
type nodeConfig struct {
|
||||
name string
|
||||
res *v1.ResourceList
|
||||
res map[v1.ResourceName]string
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
@ -826,7 +827,7 @@ func TestSchedulerInformers(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
for _, nodeConf := range test.nodes {
|
||||
_, err := createNode(cs, nodeConf.name, nodeConf.res)
|
||||
_, err := createNode(cs, st.MakeNode().Name(nodeConf.name).Capacity(nodeConf.res).Obj())
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating node %v: %v", nodeConf.name, err)
|
||||
}
|
||||
|
@ -25,7 +25,6 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -40,6 +39,7 @@ import (
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller/disruption"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
testutils "k8s.io/kubernetes/test/integration/util"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@ -148,44 +148,17 @@ func waitForNodeLabels(cs clientset.Interface, nodeName string, labels map[strin
|
||||
return wait.Poll(time.Millisecond*100, wait.ForeverTestTimeout, nodeHasLabels(cs, nodeName, labels))
|
||||
}
|
||||
|
||||
// initNode returns a node with the given resource list and images. If 'res' is nil, a predefined amount of
|
||||
// resource will be used.
|
||||
func initNode(name string, res *v1.ResourceList, images []v1.ContainerImage) *v1.Node {
|
||||
// if resource is nil, we use a default amount of resources for the node.
|
||||
if res == nil {
|
||||
res = &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
}
|
||||
}
|
||||
|
||||
n := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name},
|
||||
Spec: v1.NodeSpec{Unschedulable: false},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: *res,
|
||||
Images: images,
|
||||
},
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// createNode creates a node with the given resource list.
|
||||
func createNode(cs clientset.Interface, name string, res *v1.ResourceList) (*v1.Node, error) {
|
||||
return cs.CoreV1().Nodes().Create(context.TODO(), initNode(name, res, nil), metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
// createNodeWithImages creates a node with the given resource list and images.
|
||||
func createNodeWithImages(cs clientset.Interface, name string, res *v1.ResourceList, images []v1.ContainerImage) (*v1.Node, error) {
|
||||
return cs.CoreV1().Nodes().Create(context.TODO(), initNode(name, res, images), metav1.CreateOptions{})
|
||||
func createNode(cs clientset.Interface, node *v1.Node) (*v1.Node, error) {
|
||||
return cs.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
// createNodes creates `numNodes` nodes. The created node names will be in the
|
||||
// form of "`prefix`-X" where X is an ordinal.
|
||||
func createNodes(cs clientset.Interface, prefix string, res *v1.ResourceList, numNodes int) ([]*v1.Node, error) {
|
||||
func createNodes(cs clientset.Interface, prefix string, wrapper *st.NodeWrapper, numNodes int) ([]*v1.Node, error) {
|
||||
nodes := make([]*v1.Node, numNodes)
|
||||
for i := 0; i < numNodes; i++ {
|
||||
nodeName := fmt.Sprintf("%v-%d", prefix, i)
|
||||
node, err := createNode(cs, nodeName, res)
|
||||
node, err := createNode(cs, wrapper.Name(nodeName).Obj())
|
||||
if err != nil {
|
||||
return nodes[:], err
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user