From 0202a206b81ccc579feff77ea4593bfb8ed2c6e4 Mon Sep 17 00:00:00 2001 From: Harry Zhang Date: Thu, 14 Jan 2016 00:29:31 +0800 Subject: [PATCH] Integration test to verify alloc works Add integration test to verify allocatable can works --- .../algorithm/predicates/predicates.go | 1 - test/integration/scheduler_test.go | 116 ++++++++++++++++++ 2 files changed, 116 insertions(+), 1 deletion(-) diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/plugin/pkg/scheduler/algorithm/predicates/predicates.go index 53a22dde0db..9ccb97aaab3 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -305,7 +305,6 @@ func (r *ResourceFit) PodFitsResources(pod *api.Pod, existingPods []*api.Pod, no } allocatable := info.Status.Allocatable - if int64(len(existingPods))+1 > allocatable.Pods().Value() { glog.V(10).Infof("Cannot schedule Pod %+v, because Node %+v is full, running %v out of %v Pods.", podName(pod), node, len(existingPods), allocatable.Pods().Value()) return false, ErrExceededMaxPodNumber diff --git a/test/integration/scheduler_test.go b/test/integration/scheduler_test.go index a94b23b28d7..c5039df988b 100644 --- a/test/integration/scheduler_test.go +++ b/test/integration/scheduler_test.go @@ -449,3 +449,119 @@ func createPod(name string, annotation map[string]string) *api.Pod { }, } } + +// This test will verify scheduler can work well regardless of whether kubelet is allocatable aware or not. +func TestAllocatable(t *testing.T) { + framework.DeleteAllEtcdKeys() + + var m *master.Master + s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + m.Handler.ServeHTTP(w, req) + })) + defer s.Close() + + masterConfig := framework.NewIntegrationTestMasterConfig() + m = master.New(masterConfig) + + // 1. create and start default-scheduler + restClient := client.NewOrDie(&client.Config{Host: s.URL, GroupVersion: testapi.Default.GroupVersion()}) + + schedulerConfigFactory := factory.NewConfigFactory(restClient, api.DefaultSchedulerName) + schedulerConfig, err := schedulerConfigFactory.Create() + if err != nil { + t.Fatalf("Couldn't create scheduler config: %v", err) + } + eventBroadcaster := record.NewBroadcaster() + schedulerConfig.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: api.DefaultSchedulerName}) + eventBroadcaster.StartRecordingToSink(restClient.Events("")) + scheduler.New(schedulerConfig).Run() + // default-scheduler will be stopped later + defer close(schedulerConfig.StopEverything) + + // 2. create a node without allocatable awareness + node := &api.Node{ + ObjectMeta: api.ObjectMeta{Name: "node-allocatable-scheduler-test-node"}, + Spec: api.NodeSpec{Unschedulable: false}, + Status: api.NodeStatus{ + Capacity: api.ResourceList{ + api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI), + api.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI), + api.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI), + }, + }, + } + + allocNode, err := restClient.Nodes().Create(node) + if err != nil { + t.Fatalf("Failed to create node: %v", err) + } + + // 3. create resource pod which requires less than Capacity + podResource := &api.Pod{ + ObjectMeta: api.ObjectMeta{Name: "pod-test-allocatable"}, + Spec: api.PodSpec{ + Containers: []api.Container{ + { + Name: "container", + Image: "kubernetes/pause:go", + Resources: api.ResourceRequirements{ + Requests: api.ResourceList{ + api.ResourceCPU: *resource.NewMilliQuantity(20, resource.DecimalSI), + api.ResourceMemory: *resource.NewQuantity(20, resource.BinarySI), + }, + }, + }, + }, + }, + } + + testAllocPod, err := restClient.Pods(api.NamespaceDefault).Create(podResource) + if err != nil { + t.Fatalf("Test allocatable unawareness failed to create pod: %v", err) + } + + // 4. Test: this test pod should be scheduled since api-server will use Capacity as Allocatable + err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testAllocPod.Namespace, testAllocPod.Name)) + if err != nil { + t.Errorf("Test allocatable unawareness: %s Pod not scheduled: %v", testAllocPod.Name, err) + } else { + t.Logf("Test allocatable unawareness: %s Pod scheduled", testAllocPod.Name) + } + + // 5. Change the node status to allocatable aware, note that Allocatable is less than Pod's requirement + allocNode.Status = api.NodeStatus{ + Capacity: api.ResourceList{ + api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI), + api.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI), + api.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI), + }, + Allocatable: api.ResourceList{ + api.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI), + api.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI), + api.ResourceMemory: *resource.NewQuantity(10, resource.BinarySI), + }, + } + + if _, err := restClient.Nodes().UpdateStatus(allocNode); err != nil { + t.Fatalf("Failed to update node with Status.Allocatable: %v", err) + } + + if err := restClient.Pods(api.NamespaceDefault).Delete(podResource.Name, &api.DeleteOptions{}); err != nil { + t.Fatalf("Failed to remove first resource pod: %v", err) + } + + // 6. Make another pod with different name, same resource request + podResource.ObjectMeta.Name = "pod-test-allocatable2" + testAllocPod2, err := restClient.Pods(api.NamespaceDefault).Create(podResource) + if err != nil { + t.Fatalf("Test allocatable awareness failed to create pod: %v", err) + } + + // 7. Test: this test pod should not be scheduled since it request more than Allocatable + err = wait.Poll(time.Second, time.Second*5, podScheduled(restClient, testAllocPod2.Namespace, testAllocPod2.Name)) + if err == nil { + t.Errorf("Test allocatable awareness: %s Pod got scheduled unexpectly, %v", testAllocPod2.Name, err) + } else { + t.Logf("Test allocatable awareness: %s Pod not scheduled as expected", testAllocPod2.Name) + } +}