From 2e1042f9593b8ae0a60674ac05eb4b533ff2c5ed Mon Sep 17 00:00:00 2001 From: Andrea Nodari Date: Wed, 24 Jun 2020 17:33:41 +0200 Subject: [PATCH] Use NodeWrapper to directly initialize node with labels Using NodeWrapper in the integration tests gives more flexibility when creating nodes. For instance, tests can create nodes with labels or with a specific sets of resources. Also, NodeWrapper initialises a node with a capacity of 32 pods, which can be overridden by the caller. This makes sure that a node is usable as soon as it is created. --- pkg/scheduler/testing/BUILD | 1 + pkg/scheduler/testing/wrappers.go | 31 ++++- test/integration/scheduler/BUILD | 2 +- test/integration/scheduler/framework_test.go | 17 +-- test/integration/scheduler/predicates_test.go | 45 ++------ test/integration/scheduler/preemption_test.go | 107 ++++++++---------- test/integration/scheduler/priorities_test.go | 44 +++---- test/integration/scheduler/scheduler_test.go | 23 ++-- test/integration/scheduler/util.go | 37 +----- 9 files changed, 132 insertions(+), 175 deletions(-) diff --git a/pkg/scheduler/testing/BUILD b/pkg/scheduler/testing/BUILD index 721f61eb17d..0d6cc2d49e1 100644 --- a/pkg/scheduler/testing/BUILD +++ b/pkg/scheduler/testing/BUILD @@ -19,6 +19,7 @@ go_library( "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/pkg/scheduler/testing/wrappers.go b/pkg/scheduler/testing/wrappers.go index 80ea1448894..ae014c03854 100644 --- a/pkg/scheduler/testing/wrappers.go +++ b/pkg/scheduler/testing/wrappers.go @@ -19,7 +19,8 @@ package testing import ( "fmt" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ) @@ -362,7 +363,8 @@ type NodeWrapper struct{ v1.Node } // MakeNode creates a Node wrapper. func MakeNode() *NodeWrapper { - return &NodeWrapper{v1.Node{}} + w := &NodeWrapper{v1.Node{}} + return w.Capacity(nil) } // Obj returns the inner Node. @@ -390,3 +392,28 @@ func (n *NodeWrapper) Label(k, v string) *NodeWrapper { n.Labels[k] = v return n } + +// Capacity sets the capacity and the allocatable resources of the inner node. +// Each entry in `resources` corresponds to a resource name and its quantity. +// By default, the capacity and allocatable number of pods are set to 32. +func (n *NodeWrapper) Capacity(resources map[v1.ResourceName]string) *NodeWrapper { + res := v1.ResourceList{ + v1.ResourcePods: resource.MustParse("32"), + } + for name, value := range resources { + res[name] = resource.MustParse(value) + } + n.Status.Capacity, n.Status.Allocatable = res, res + return n +} + +// Images sets the images of the inner node. Each entry in `images` corresponds +// to an image name and its size in bytes. +func (n *NodeWrapper) Images(images map[string]int64) *NodeWrapper { + var containerImages []v1.ContainerImage + for name, size := range images { + containerImages = append(containerImages, v1.ContainerImage{Names: []string{name}, SizeBytes: size}) + } + n.Status.Images = containerImages + return n +} diff --git a/test/integration/scheduler/BUILD b/test/integration/scheduler/BUILD index ae90a30f8c2..5f15fa440a5 100644 --- a/test/integration/scheduler/BUILD +++ b/test/integration/scheduler/BUILD @@ -86,10 +86,10 @@ go_library( "//pkg/api/v1/pod:go_default_library", "//pkg/controller/disruption:go_default_library", "//pkg/scheduler:go_default_library", + "//pkg/scheduler/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", diff --git a/test/integration/scheduler/framework_test.go b/test/integration/scheduler/framework_test.go index 0657a409d2c..8735ff56bbb 100644 --- a/test/integration/scheduler/framework_test.go +++ b/test/integration/scheduler/framework_test.go @@ -22,7 +22,7 @@ import ( "testing" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -33,6 +33,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder" frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + st "k8s.io/kubernetes/pkg/scheduler/testing" testutils "k8s.io/kubernetes/test/integration/util" ) @@ -1130,7 +1131,7 @@ func TestBindPlugin(t *testing.T) { defer testutils.CleanupTest(t, testCtx) // Add a few nodes. - _, err := createNodes(testCtx.ClientSet, "test-node", nil, 2) + _, err := createNodes(testCtx.ClientSet, "test-node", st.MakeNode(), 2) if err != nil { t.Fatalf("Cannot create nodes: %v", err) } @@ -1776,12 +1777,12 @@ func TestPreemptWithPermitPlugin(t *testing.T) { defer testutils.CleanupTest(t, testCtx) // Add one node. - nodeRes := &v1.ResourceList{ - v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI), - v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI), + nodeRes := map[v1.ResourceName]string{ + v1.ResourcePods: "32", + v1.ResourceCPU: "500m", + v1.ResourceMemory: "500", } - _, err := createNodes(testCtx.ClientSet, "test-node", nodeRes, 1) + _, err := createNodes(testCtx.ClientSet, "test-node", st.MakeNode().Capacity(nodeRes), 1) if err != nil { t.Fatalf("Cannot create nodes: %v", err) } @@ -1841,7 +1842,7 @@ func initTestSchedulerForFrameworkTest(t *testing.T, testCtx *testutils.TestCont go testCtx.Scheduler.Run(testCtx.Ctx) if nodeCount > 0 { - _, err := createNodes(testCtx.ClientSet, "test-node", nil, nodeCount) + _, err := createNodes(testCtx.ClientSet, "test-node", st.MakeNode(), nodeCount) if err != nil { t.Fatalf("Cannot create nodes: %v", err) } diff --git a/test/integration/scheduler/predicates_test.go b/test/integration/scheduler/predicates_test.go index 1c98f5e8601..f700be4754d 100644 --- a/test/integration/scheduler/predicates_test.go +++ b/test/integration/scheduler/predicates_test.go @@ -24,7 +24,6 @@ import ( v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/kubernetes" @@ -43,25 +42,12 @@ const pollInterval = 100 * time.Millisecond func TestInterPodAffinity(t *testing.T) { testCtx := initTest(t, "inter-pod-affinity") defer testutils.CleanupTest(t, testCtx) - // Add a few nodes. - nodes, err := createNodes(testCtx.ClientSet, "testnode", nil, 2) + + // Add a few nodes with labels + nodes, err := createNodes(testCtx.ClientSet, "testnode", st.MakeNode().Label("region", "r1").Label("zone", "z11"), 2) if err != nil { t.Fatalf("Cannot create nodes: %v", err) } - // Add labels to the nodes. - labels1 := map[string]string{ - "region": "r1", - "zone": "z11", - } - for _, node := range nodes { - // TODO(nodo): Use PodWrapper to directly initialize node with labels. - if err = utils.AddLabelsToNode(testCtx.ClientSet, node.Name, labels1); err != nil { - t.Fatalf("Cannot add labels to node: %v", err) - } - if err = waitForNodeLabels(testCtx.ClientSet, node.Name, labels1); err != nil { - t.Fatalf("Adding labels to node didn't succeed: %v", err) - } - } cs := testCtx.ClientSet podLabel := map[string]string{"service": "securityscan"} @@ -886,7 +872,7 @@ func TestEvenPodsSpreadPredicate(t *testing.T) { ns := testCtx.NS.Name defer testutils.CleanupTest(t, testCtx) // Add 4 nodes. - nodes, err := createNodes(cs, "node", nil, 4) + nodes, err := createNodes(cs, "node", st.MakeNode(), 4) if err != nil { t.Fatalf("Cannot create nodes: %v", err) } @@ -896,7 +882,6 @@ func TestEvenPodsSpreadPredicate(t *testing.T) { "zone": fmt.Sprintf("zone-%d", i/2), "node": node.Name, } - // TODO(nodo): Use PodWrapper to directly initialize node with labels. if err = utils.AddLabelsToNode(cs, node.Name, labels); err != nil { t.Fatalf("Cannot add labels to node: %v", err) } @@ -1051,7 +1036,7 @@ func TestUnschedulablePodBecomesSchedulable(t *testing.T) { Name: "pod-1", }, update: func(cs kubernetes.Interface, _ string) error { - _, err := createNode(cs, "node-added", nil) + _, err := createNode(cs, st.MakeNode().Name("node-added").Obj()) if err != nil { return fmt.Errorf("cannot create node: %v", err) } @@ -1061,7 +1046,7 @@ func TestUnschedulablePodBecomesSchedulable(t *testing.T) { { name: "node gets taint removed", init: func(cs kubernetes.Interface, _ string) error { - node, err := createNode(cs, "node-tainted", nil) + node, err := createNode(cs, st.MakeNode().Name("node-tainted").Obj()) if err != nil { return fmt.Errorf("cannot create node: %v", err) } @@ -1085,10 +1070,8 @@ func TestUnschedulablePodBecomesSchedulable(t *testing.T) { { name: "other pod gets deleted", init: func(cs kubernetes.Interface, ns string) error { - nodeResources := &v1.ResourceList{ - v1.ResourcePods: *resource.NewQuantity(1, resource.DecimalSI), - } - _, err := createNode(cs, "node-scheduler-integration-test", nodeResources) + nodeObject := st.MakeNode().Name("node-scheduler-integration-test").Capacity(map[v1.ResourceName]string{v1.ResourcePods: "1"}).Obj() + _, err := createNode(cs, nodeObject) if err != nil { return fmt.Errorf("cannot create node: %v", err) } @@ -1111,14 +1094,10 @@ func TestUnschedulablePodBecomesSchedulable(t *testing.T) { { name: "pod with pod-affinity gets added", init: func(cs kubernetes.Interface, _ string) error { - node, err := createNode(cs, "node-1", nil) + _, err := createNode(cs, st.MakeNode().Name("node-1").Label("region", "test").Obj()) if err != nil { return fmt.Errorf("cannot create node: %v", err) } - // TODO(nodo): Use PodWrapper to directly initialize node with labels. - if err := utils.AddLabelsToNode(cs, node.Name, map[string]string{"region": "test"}); err != nil { - return fmt.Errorf("cannot add labels to node: %v", err) - } return nil }, pod: &pausePodConfig{ @@ -1155,14 +1134,10 @@ func TestUnschedulablePodBecomesSchedulable(t *testing.T) { { name: "scheduled pod gets updated to match affinity", init: func(cs kubernetes.Interface, ns string) error { - node, err := createNode(cs, "node-1", nil) + _, err := createNode(cs, st.MakeNode().Name("node-1").Label("region", "test").Obj()) if err != nil { return fmt.Errorf("cannot create node: %v", err) } - // TODO(nodo): Use PodWrapper to directly initialize node with labels. - if err := utils.AddLabelsToNode(cs, node.Name, map[string]string{"region": "test"}); err != nil { - return fmt.Errorf("cannot add labels to node: %v", err) - } if _, err := createPausePod(cs, initPausePod(&pausePodConfig{Name: "pod-to-be-updated", Namespace: ns})); err != nil { return fmt.Errorf("cannot create pod: %v", err) } diff --git a/test/integration/scheduler/preemption_test.go b/test/integration/scheduler/preemption_test.go index 2ec631207de..0bbe214bec6 100644 --- a/test/integration/scheduler/preemption_test.go +++ b/test/integration/scheduler/preemption_test.go @@ -47,9 +47,9 @@ import ( schedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + st "k8s.io/kubernetes/pkg/scheduler/testing" "k8s.io/kubernetes/plugin/pkg/admission/priority" testutils "k8s.io/kubernetes/test/integration/util" - "k8s.io/kubernetes/test/utils" ) var lowPriority, mediumPriority, highPriority = int32(100), int32(200), int32(300) @@ -379,21 +379,14 @@ func TestPreemption(t *testing.T) { } // Create a node with some resources and a label. - nodeRes := &v1.ResourceList{ - v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI), - v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI), + nodeRes := map[v1.ResourceName]string{ + v1.ResourcePods: "32", + v1.ResourceCPU: "500m", + v1.ResourceMemory: "500", } - node, err := createNode(testCtx.ClientSet, "node1", nodeRes) - if err != nil { - t.Fatalf("Error creating nodes: %v", err) - } - nodeLabels := map[string]string{"node": node.Name} - if err = utils.AddLabelsToNode(testCtx.ClientSet, node.Name, nodeLabels); err != nil { - t.Fatalf("Cannot add labels to node: %v", err) - } - if err = waitForNodeLabels(testCtx.ClientSet, node.Name, nodeLabels); err != nil { - t.Fatalf("Adding labels to node didn't succeed: %v", err) + nodeObject := st.MakeNode().Name("node1").Capacity(nodeRes).Label("node", "node1").Obj() + if _, err := createNode(testCtx.ClientSet, nodeObject); err != nil { + t.Fatalf("Error creating node: %v", err) } for _, test := range tests { @@ -481,13 +474,13 @@ func TestNonPreemption(t *testing.T) { }, }) - // Create a node with some resources and a label. - nodeRes := &v1.ResourceList{ - v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI), - v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI), + // Create a node with some resources + nodeRes := map[v1.ResourceName]string{ + v1.ResourcePods: "32", + v1.ResourceCPU: "500m", + v1.ResourceMemory: "500", } - _, err := createNode(testCtx.ClientSet, "node1", nodeRes) + _, err := createNode(testCtx.ClientSet, st.MakeNode().Name("node1").Capacity(nodeRes).Obj()) if err != nil { t.Fatalf("Error creating nodes: %v", err) } @@ -557,13 +550,13 @@ func TestDisablePreemption(t *testing.T) { }, } - // Create a node with some resources and a label. - nodeRes := &v1.ResourceList{ - v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI), - v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI), + // Create a node with some resources + nodeRes := map[v1.ResourceName]string{ + v1.ResourcePods: "32", + v1.ResourceCPU: "500m", + v1.ResourceMemory: "500", } - _, err := createNode(testCtx.ClientSet, "node1", nodeRes) + _, err := createNode(testCtx.ClientSet, st.MakeNode().Name("node1").Capacity(nodeRes).Obj()) if err != nil { t.Fatalf("Error creating nodes: %v", err) } @@ -664,13 +657,13 @@ func TestPodPriorityResolution(t *testing.T) { }, } - // Create a node with some resources and a label. - nodeRes := &v1.ResourceList{ - v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI), - v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI), + // Create a node with some resources + nodeRes := map[v1.ResourceName]string{ + v1.ResourcePods: "32", + v1.ResourceCPU: "500m", + v1.ResourceMemory: "500", } - _, err := createNode(testCtx.ClientSet, "node1", nodeRes) + _, err := createNode(testCtx.ClientSet, st.MakeNode().Name("node1").Capacity(nodeRes).Obj()) if err != nil { t.Fatalf("Error creating nodes: %v", err) } @@ -754,13 +747,13 @@ func TestPreemptionStarvation(t *testing.T) { }, } - // Create a node with some resources and a label. - nodeRes := &v1.ResourceList{ - v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI), - v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI), + // Create a node with some resources + nodeRes := map[v1.ResourceName]string{ + v1.ResourcePods: "32", + v1.ResourceCPU: "500m", + v1.ResourceMemory: "500", } - _, err := createNode(testCtx.ClientSet, "node1", nodeRes) + _, err := createNode(testCtx.ClientSet, st.MakeNode().Name("node1").Capacity(nodeRes).Obj()) if err != nil { t.Fatalf("Error creating nodes: %v", err) } @@ -855,13 +848,13 @@ func TestPreemptionRaces(t *testing.T) { }, } - // Create a node with some resources and a label. - nodeRes := &v1.ResourceList{ - v1.ResourcePods: *resource.NewQuantity(100, resource.DecimalSI), - v1.ResourceCPU: *resource.NewMilliQuantity(5000, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(5000, resource.DecimalSI), + // Create a node with some resources + nodeRes := map[v1.ResourceName]string{ + v1.ResourcePods: "100", + v1.ResourceCPU: "5000m", + v1.ResourceMemory: "5000", } - _, err := createNode(testCtx.ClientSet, "node1", nodeRes) + _, err := createNode(testCtx.ClientSet, st.MakeNode().Name("node1").Capacity(nodeRes).Obj()) if err != nil { t.Fatalf("Error creating nodes: %v", err) } @@ -954,13 +947,13 @@ func TestNominatedNodeCleanUp(t *testing.T) { defer cleanupPodsInNamespace(cs, t, testCtx.NS.Name) - // Create a node with some resources and a label. - nodeRes := &v1.ResourceList{ - v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI), - v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI), + // Create a node with some resources + nodeRes := map[v1.ResourceName]string{ + v1.ResourcePods: "32", + v1.ResourceCPU: "500m", + v1.ResourceMemory: "500", } - _, err := createNode(testCtx.ClientSet, "node1", nodeRes) + _, err := createNode(testCtx.ClientSet, st.MakeNode().Name("node1").Capacity(nodeRes).Obj()) if err != nil { t.Fatalf("Error creating nodes: %v", err) } @@ -1069,15 +1062,15 @@ func TestPDBInPreemption(t *testing.T) { v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(100, resource.DecimalSI)}, } - defaultNodeRes := &v1.ResourceList{ - v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI), - v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI), + defaultNodeRes := map[v1.ResourceName]string{ + v1.ResourcePods: "32", + v1.ResourceCPU: "500m", + v1.ResourceMemory: "500", } type nodeConfig struct { name string - res *v1.ResourceList + res map[v1.ResourceName]string } tests := []struct { @@ -1253,7 +1246,7 @@ func TestPDBInPreemption(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { for _, nodeConf := range test.nodes { - _, err := createNode(cs, nodeConf.name, nodeConf.res) + _, err := createNode(cs, st.MakeNode().Name(nodeConf.name).Capacity(nodeConf.res).Obj()) if err != nil { t.Fatalf("Error creating node %v: %v", nodeConf.name, err) } diff --git a/test/integration/scheduler/priorities_test.go b/test/integration/scheduler/priorities_test.go index 3776cefc36a..15606729d89 100644 --- a/test/integration/scheduler/priorities_test.go +++ b/test/integration/scheduler/priorities_test.go @@ -40,7 +40,7 @@ func TestNodeAffinity(t *testing.T) { testCtx := initTest(t, "node-affinity") defer testutils.CleanupTest(t, testCtx) // Add a few nodes. - nodes, err := createNodes(testCtx.ClientSet, "testnode", nil, 5) + nodes, err := createNodes(testCtx.ClientSet, "testnode", st.MakeNode(), 5) if err != nil { t.Fatalf("Cannot create nodes: %v", err) } @@ -97,23 +97,11 @@ func TestPodAffinity(t *testing.T) { testCtx := initTest(t, "pod-affinity") defer testutils.CleanupTest(t, testCtx) // Add a few nodes. - nodesInTopology, err := createNodes(testCtx.ClientSet, "in-topology", nil, 5) - if err != nil { - t.Fatalf("Cannot create nodes: %v", err) - } topologyKey := "node-topologykey" topologyValue := "topologyvalue" - nodeLabels := map[string]string{ - topologyKey: topologyValue, - } - for _, node := range nodesInTopology { - // Add topology key to all the nodes. - if err = utils.AddLabelsToNode(testCtx.ClientSet, node.Name, nodeLabels); err != nil { - t.Fatalf("Cannot add labels to node %v: %v", node.Name, err) - } - if err = waitForNodeLabels(testCtx.ClientSet, node.Name, nodeLabels); err != nil { - t.Fatalf("Adding labels to node %v didn't succeed: %v", node.Name, err) - } + nodesInTopology, err := createNodes(testCtx.ClientSet, "in-topology", st.MakeNode().Label(topologyKey, topologyValue), 5) + if err != nil { + t.Fatalf("Cannot create nodes: %v", err) } // Add a pod with a label and wait for it to schedule. labelKey := "service" @@ -127,7 +115,7 @@ func TestPodAffinity(t *testing.T) { t.Fatalf("Error running the attractor pod: %v", err) } // Add a few more nodes without the topology label. - _, err = createNodes(testCtx.ClientSet, "other-node", nil, 5) + _, err = createNodes(testCtx.ClientSet, "other-node", st.MakeNode(), 5) if err != nil { t.Fatalf("Cannot create the second set of nodes: %v", err) } @@ -187,22 +175,20 @@ func TestImageLocality(t *testing.T) { testCtx := initTest(t, "image-locality") defer testutils.CleanupTest(t, testCtx) - // We use a fake large image as the test image used by the pod, which has relatively large image size. - image := v1.ContainerImage{ - Names: []string{ - "fake-large-image:v1", - }, - SizeBytes: 3000 * 1024 * 1024, - } - // Create a node with the large image. - nodeWithLargeImage, err := createNodeWithImages(testCtx.ClientSet, "testnode-large-image", nil, []v1.ContainerImage{image}) + // We use a fake large image as the test image used by the pod, which has + // relatively large image size. + imageName := "fake-large-image:v1" + nodeWithLargeImage, err := createNode( + testCtx.ClientSet, + st.MakeNode().Name("testnode-large-image").Images(map[string]int64{imageName: 3000 * 1024 * 1024}).Obj(), + ) if err != nil { t.Fatalf("cannot create node with a large image: %v", err) } // Add a few nodes. - _, err = createNodes(testCtx.ClientSet, "testnode", nil, 10) + _, err = createNodes(testCtx.ClientSet, "testnode", st.MakeNode(), 10) if err != nil { t.Fatalf("cannot create nodes: %v", err) } @@ -212,7 +198,7 @@ func TestImageLocality(t *testing.T) { pod, err := runPodWithContainers(testCtx.ClientSet, initPodWithContainers(testCtx.ClientSet, &podWithContainersConfig{ Name: podName, Namespace: testCtx.NS.Name, - Containers: makeContainersWithImages(image.Names), + Containers: makeContainersWithImages([]string{imageName}), })) if err != nil { t.Fatalf("error running pod with images: %v", err) @@ -249,7 +235,7 @@ func TestEvenPodsSpreadPriority(t *testing.T) { ns := testCtx.NS.Name defer testutils.CleanupTest(t, testCtx) // Add 4 nodes. - nodes, err := createNodes(cs, "node", nil, 4) + nodes, err := createNodes(cs, "node", st.MakeNode(), 4) if err != nil { t.Fatalf("Cannot create nodes: %v", err) } diff --git a/test/integration/scheduler/scheduler_test.go b/test/integration/scheduler/scheduler_test.go index 4b0464514f1..d29027461bf 100644 --- a/test/integration/scheduler/scheduler_test.go +++ b/test/integration/scheduler/scheduler_test.go @@ -40,6 +40,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler" kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/profile" + st "k8s.io/kubernetes/pkg/scheduler/testing" "k8s.io/kubernetes/test/integration/framework" testutils "k8s.io/kubernetes/test/integration/util" ) @@ -697,12 +698,12 @@ func TestAllocatable(t *testing.T) { defer testutils.CleanupTest(t, testCtx) // 2. create a node without allocatable awareness - nodeRes := &v1.ResourceList{ - v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI), - v1.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI), + nodeRes := map[v1.ResourceName]string{ + v1.ResourcePods: "32", + v1.ResourceCPU: "30m", + v1.ResourceMemory: "30", } - allocNode, err := createNode(testCtx.ClientSet, "node-allocatable-scheduler-test-node", nodeRes) + allocNode, err := createNode(testCtx.ClientSet, st.MakeNode().Name("node-allocatable-scheduler-test-node").Capacity(nodeRes).Obj()) if err != nil { t.Fatalf("Failed to create node: %v", err) } @@ -775,15 +776,15 @@ func TestSchedulerInformers(t *testing.T) { v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(200, resource.BinarySI)}, } - defaultNodeRes := &v1.ResourceList{ - v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI), - v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI), - v1.ResourceMemory: *resource.NewQuantity(500, resource.BinarySI), + defaultNodeRes := map[v1.ResourceName]string{ + v1.ResourcePods: "32", + v1.ResourceCPU: "500m", + v1.ResourceMemory: "500", } type nodeConfig struct { name string - res *v1.ResourceList + res map[v1.ResourceName]string } tests := []struct { @@ -826,7 +827,7 @@ func TestSchedulerInformers(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { for _, nodeConf := range test.nodes { - _, err := createNode(cs, nodeConf.name, nodeConf.res) + _, err := createNode(cs, st.MakeNode().Name(nodeConf.name).Capacity(nodeConf.res).Obj()) if err != nil { t.Fatalf("Error creating node %v: %v", nodeConf.name, err) } diff --git a/test/integration/scheduler/util.go b/test/integration/scheduler/util.go index 343725ee9ba..daa2f73be87 100644 --- a/test/integration/scheduler/util.go +++ b/test/integration/scheduler/util.go @@ -25,7 +25,6 @@ import ( v1 "k8s.io/api/core/v1" policy "k8s.io/api/policy/v1beta1" apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" @@ -40,6 +39,7 @@ import ( podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/controller/disruption" "k8s.io/kubernetes/pkg/scheduler" + st "k8s.io/kubernetes/pkg/scheduler/testing" testutils "k8s.io/kubernetes/test/integration/util" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -148,44 +148,17 @@ func waitForNodeLabels(cs clientset.Interface, nodeName string, labels map[strin return wait.Poll(time.Millisecond*100, wait.ForeverTestTimeout, nodeHasLabels(cs, nodeName, labels)) } -// initNode returns a node with the given resource list and images. If 'res' is nil, a predefined amount of -// resource will be used. -func initNode(name string, res *v1.ResourceList, images []v1.ContainerImage) *v1.Node { - // if resource is nil, we use a default amount of resources for the node. - if res == nil { - res = &v1.ResourceList{ - v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI), - } - } - - n := &v1.Node{ - ObjectMeta: metav1.ObjectMeta{Name: name}, - Spec: v1.NodeSpec{Unschedulable: false}, - Status: v1.NodeStatus{ - Capacity: *res, - Images: images, - }, - } - return n -} - -// createNode creates a node with the given resource list. -func createNode(cs clientset.Interface, name string, res *v1.ResourceList) (*v1.Node, error) { - return cs.CoreV1().Nodes().Create(context.TODO(), initNode(name, res, nil), metav1.CreateOptions{}) -} - -// createNodeWithImages creates a node with the given resource list and images. -func createNodeWithImages(cs clientset.Interface, name string, res *v1.ResourceList, images []v1.ContainerImage) (*v1.Node, error) { - return cs.CoreV1().Nodes().Create(context.TODO(), initNode(name, res, images), metav1.CreateOptions{}) +func createNode(cs clientset.Interface, node *v1.Node) (*v1.Node, error) { + return cs.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}) } // createNodes creates `numNodes` nodes. The created node names will be in the // form of "`prefix`-X" where X is an ordinal. -func createNodes(cs clientset.Interface, prefix string, res *v1.ResourceList, numNodes int) ([]*v1.Node, error) { +func createNodes(cs clientset.Interface, prefix string, wrapper *st.NodeWrapper, numNodes int) ([]*v1.Node, error) { nodes := make([]*v1.Node, numNodes) for i := 0; i < numNodes; i++ { nodeName := fmt.Sprintf("%v-%d", prefix, i) - node, err := createNode(cs, nodeName, res) + node, err := createNode(cs, wrapper.Name(nodeName).Obj()) if err != nil { return nodes[:], err }