mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-12 05:21:58 +00:00
Merge pull request #10857 from davidopp/somebranch
Increase zero-limit pod RAM for spreading to 200 MB to match cluster
This commit is contained in:
commit
e718cf5d51
@ -46,9 +46,10 @@ func calculateScore(requested int64, capacity int64, node string) int {
|
||||
// of computing priority only. This ensures that when scheduling zero-limit pods, such
|
||||
// pods will not all be scheduled to the machine with the smallest in-use limit,
|
||||
// and that when scheduling regular pods, such pods will not see zero-limit pods as
|
||||
// consuming no resources whatsoever.
|
||||
const defaultMilliCpuLimit int64 = 100 // 0.1 core
|
||||
const defaultMemoryLimit int64 = 60 * 1024 * 1024 // 60 MB
|
||||
// consuming no resources whatsoever. We chose these values to be similar to the
|
||||
// resources that we give to cluster addon pods (#10653). But they are pretty arbitrary.
|
||||
const defaultMilliCpuLimit int64 = 100 // 0.1 core
|
||||
const defaultMemoryLimit int64 = 200 * 1024 * 1024 // 200 MB
|
||||
|
||||
// TODO: Consider setting default as a fixed fraction of machine capacity (take "capacity api.ResourceList"
|
||||
// as an additional argument here) rather than using constants
|
||||
|
@ -91,12 +91,11 @@ func TestZeroLimit(t *testing.T) {
|
||||
nodes []api.Node
|
||||
test string
|
||||
}{
|
||||
// The point of these tests is to show you get the same priority for a zero-limit pod
|
||||
// The point of these next two tests is to show you get the same priority for a zero-limit pod
|
||||
// as for a pod with the defaults limits, both when the zero-limit pod is already on the machine
|
||||
// and when the zero-limit pod is the one being scheduled.
|
||||
{
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
// match current f1-micro on GCE
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeMinion("machine1", 1000, defaultMemoryLimit*10), makeMinion("machine2", 1000, defaultMemoryLimit*10)},
|
||||
test: "test priority of zero-limit pod with machine with zero-limit pod",
|
||||
pods: []*api.Pod{
|
||||
@ -105,8 +104,7 @@ func TestZeroLimit(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{Spec: small},
|
||||
// match current f1-micro on GCE
|
||||
pod: &api.Pod{Spec: small},
|
||||
nodes: []api.Node{makeMinion("machine1", 1000, defaultMemoryLimit*10), makeMinion("machine2", 1000, defaultMemoryLimit*10)},
|
||||
test: "test priority of nonzero-limit pod with machine with zero-limit pod",
|
||||
pods: []*api.Pod{
|
||||
@ -114,6 +112,16 @@ func TestZeroLimit(t *testing.T) {
|
||||
{Spec: large2}, {Spec: small2},
|
||||
},
|
||||
},
|
||||
// The point of this test is to verify that we're not just getting the same score no matter what we schedule.
|
||||
{
|
||||
pod: &api.Pod{Spec: large},
|
||||
nodes: []api.Node{makeMinion("machine1", 1000, defaultMemoryLimit*10), makeMinion("machine2", 1000, defaultMemoryLimit*10)},
|
||||
test: "test priority of larger pod with machine with zero-limit pod",
|
||||
pods: []*api.Pod{
|
||||
{Spec: large1}, {Spec: noResources1},
|
||||
{Spec: large2}, {Spec: small2},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
const expectedPriority int = 25
|
||||
@ -130,8 +138,14 @@ func TestZeroLimit(t *testing.T) {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
for _, hp := range list {
|
||||
if hp.Score != expectedPriority {
|
||||
t.Errorf("%s: expected 25 for all priorities, got list %#v", list)
|
||||
if test.test == "test priority of larger pod with machine with zero-limit pod" {
|
||||
if hp.Score == expectedPriority {
|
||||
t.Error("%s: expected non-%d for all priorities, got list %#v", expectedPriority, list)
|
||||
}
|
||||
} else {
|
||||
if hp.Score != expectedPriority {
|
||||
t.Errorf("%s: expected %d for all priorities, got list %#v", expectedPriority, list)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user