mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-13 22:05:59 +00:00
density test: adjust CPU and memory limits
Adjusted limits based on recent job log: I1028 20:05:42.079182 1002 resource_usage_test.go:199] Resource usage: container cpu(cores) memory_working_set(MB) memory_rss(MB) "kubelet" 0.024 22.17 14.20 "runtime" 0.041 409.70 84.21 I1028 20:05:42.079274 1002 resource_usage_test.go:206] CPU usage of containers: container 50th% 90th% 95th% 99th% 100th% "/" N/A N/A N/A N/A N/A "runtime" 0.014 0.834 0.834 0.834 1.083 "kubelet" 0.023 0.093 0.093 0.093 0.164 Increasing 95th percentile for runtime CPU usage should also make pull-kubernetes-node-kubelet-containerd-flaky less flaky.
This commit is contained in:
parent
5f594f4215
commit
04f7a86001
@ -77,18 +77,24 @@ var _ = SIGDescribe("Density", framework.WithSerial(), framework.WithSlow(), fun
|
|||||||
})
|
})
|
||||||
|
|
||||||
f.Context("create a batch of pods", framework.WithFlaky(), func() {
|
f.Context("create a batch of pods", framework.WithFlaky(), func() {
|
||||||
// TODO(coufon): the values are generous, set more precise limits with benchmark data
|
// If this test case fails with am error similar to
|
||||||
// and add more tests
|
// "container "runtime": expected 95th% usage < 0.900; got 0.941",
|
||||||
|
// it is likely that cpuLimits or memLimits need to be increased.
|
||||||
|
// Actual resource usage values can be found in the test output, e.g.:
|
||||||
|
// I1029 11:28:15.671913 1005 resource_usage_test.go:206] CPU usage of containers:
|
||||||
|
// container 50th% 90th% 95th% 99th% 100th%
|
||||||
|
// "runtime" 0.004 0.941 0.941 0.941 0.976
|
||||||
|
// "kubelet" 0.009 0.082 0.082 0.082 0.101
|
||||||
dTests := []densityTest{
|
dTests := []densityTest{
|
||||||
{
|
{
|
||||||
podsNr: 10,
|
podsNr: 10,
|
||||||
interval: 0 * time.Millisecond,
|
interval: 0 * time.Millisecond,
|
||||||
cpuLimits: e2ekubelet.ContainersCPUSummary{
|
cpuLimits: e2ekubelet.ContainersCPUSummary{
|
||||||
kubeletstatsv1alpha1.SystemContainerKubelet: {0.50: 0.30, 0.95: 0.50},
|
kubeletstatsv1alpha1.SystemContainerKubelet: {0.50: 0.1, 0.95: 0.20},
|
||||||
kubeletstatsv1alpha1.SystemContainerRuntime: {0.50: 0.40, 0.95: 0.60},
|
kubeletstatsv1alpha1.SystemContainerRuntime: {0.50: 0.1, 0.95: 1.5},
|
||||||
},
|
},
|
||||||
memLimits: e2ekubelet.ResourceUsagePerContainer{
|
memLimits: e2ekubelet.ResourceUsagePerContainer{
|
||||||
kubeletstatsv1alpha1.SystemContainerKubelet: &e2ekubelet.ContainerResourceUsage{MemoryRSSInBytes: 100 * 1024 * 1024},
|
kubeletstatsv1alpha1.SystemContainerKubelet: &e2ekubelet.ContainerResourceUsage{MemoryRSSInBytes: 50 * 1024 * 1024},
|
||||||
kubeletstatsv1alpha1.SystemContainerRuntime: &e2ekubelet.ContainerResourceUsage{MemoryRSSInBytes: 500 * 1024 * 1024},
|
kubeletstatsv1alpha1.SystemContainerRuntime: &e2ekubelet.ContainerResourceUsage{MemoryRSSInBytes: 500 * 1024 * 1024},
|
||||||
},
|
},
|
||||||
// percentile limit of single pod startup latency
|
// percentile limit of single pod startup latency
|
||||||
|
Loading…
Reference in New Issue
Block a user