From bdf22e31211ceb80e4cef2ac02849e5ad03197aa Mon Sep 17 00:00:00 2001 From: David Oppenheimer Date: Tue, 7 Jul 2015 12:44:31 -0700 Subject: [PATCH] Increase zero-limit pod RAM for spreading to 200 MB to match cluster addon pods and represent less trivial fraction of typical machine RAM (e.g. n1-standard-1). --- .../algorithm/priorities/priorities.go | 7 +++-- .../algorithm/priorities/priorities_test.go | 28 ++++++++++++++----- 2 files changed, 25 insertions(+), 10 deletions(-) diff --git a/plugin/pkg/scheduler/algorithm/priorities/priorities.go b/plugin/pkg/scheduler/algorithm/priorities/priorities.go index 4a4e333131a..1150ac228e3 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/priorities.go +++ b/plugin/pkg/scheduler/algorithm/priorities/priorities.go @@ -46,9 +46,10 @@ func calculateScore(requested int64, capacity int64, node string) int { // of computing priority only. This ensures that when scheduling zero-limit pods, such // pods will not all be scheduled to the machine with the smallest in-use limit, // and that when scheduling regular pods, such pods will not see zero-limit pods as -// consuming no resources whatsoever. -const defaultMilliCpuLimit int64 = 100 // 0.1 core -const defaultMemoryLimit int64 = 60 * 1024 * 1024 // 60 MB +// consuming no resources whatsoever. We chose these values to be similar to the +// resources that we give to cluster addon pods (#10653). But they are pretty arbitrary. +const defaultMilliCpuLimit int64 = 100 // 0.1 core +const defaultMemoryLimit int64 = 200 * 1024 * 1024 // 200 MB // TODO: Consider setting default as a fixed fraction of machine capacity (take "capacity api.ResourceList" // as an additional argument here) rather than using constants diff --git a/plugin/pkg/scheduler/algorithm/priorities/priorities_test.go b/plugin/pkg/scheduler/algorithm/priorities/priorities_test.go index b6af3446f29..08fcfaf12c2 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/priorities_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/priorities_test.go @@ -91,12 +91,11 @@ func TestZeroLimit(t *testing.T) { nodes []api.Node test string }{ - // The point of these tests is to show you get the same priority for a zero-limit pod + // The point of these next two tests is to show you get the same priority for a zero-limit pod // as for a pod with the defaults limits, both when the zero-limit pod is already on the machine // and when the zero-limit pod is the one being scheduled. { - pod: &api.Pod{Spec: noResources}, - // match current f1-micro on GCE + pod: &api.Pod{Spec: noResources}, nodes: []api.Node{makeMinion("machine1", 1000, defaultMemoryLimit*10), makeMinion("machine2", 1000, defaultMemoryLimit*10)}, test: "test priority of zero-limit pod with machine with zero-limit pod", pods: []*api.Pod{ @@ -105,8 +104,7 @@ func TestZeroLimit(t *testing.T) { }, }, { - pod: &api.Pod{Spec: small}, - // match current f1-micro on GCE + pod: &api.Pod{Spec: small}, nodes: []api.Node{makeMinion("machine1", 1000, defaultMemoryLimit*10), makeMinion("machine2", 1000, defaultMemoryLimit*10)}, test: "test priority of nonzero-limit pod with machine with zero-limit pod", pods: []*api.Pod{ @@ -114,6 +112,16 @@ func TestZeroLimit(t *testing.T) { {Spec: large2}, {Spec: small2}, }, }, + // The point of this test is to verify that we're not just getting the same score no matter what we schedule. + { + pod: &api.Pod{Spec: large}, + nodes: []api.Node{makeMinion("machine1", 1000, defaultMemoryLimit*10), makeMinion("machine2", 1000, defaultMemoryLimit*10)}, + test: "test priority of larger pod with machine with zero-limit pod", + pods: []*api.Pod{ + {Spec: large1}, {Spec: noResources1}, + {Spec: large2}, {Spec: small2}, + }, + }, } const expectedPriority int = 25 @@ -130,8 +138,14 @@ func TestZeroLimit(t *testing.T) { t.Errorf("unexpected error: %v", err) } for _, hp := range list { - if hp.Score != expectedPriority { - t.Errorf("%s: expected 25 for all priorities, got list %#v", list) + if test.test == "test priority of larger pod with machine with zero-limit pod" { + if hp.Score == expectedPriority { + t.Error("%s: expected non-%d for all priorities, got list %#v", expectedPriority, list) + } + } else { + if hp.Score != expectedPriority { + t.Errorf("%s: expected %d for all priorities, got list %#v", expectedPriority, list) + } } } }