mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #12462 from gmarek/cadvisor2
Add e2e test monitoring current container resource usage with updated constants.
This commit is contained in:
commit
77f431649b
@ -95,7 +95,9 @@ GCE_DEFAULT_SKIP_TESTS=(
|
||||
|
||||
# The following tests are known to be flaky, and are thus run only in their own
|
||||
# -flaky- build variants.
|
||||
GCE_FLAKY_TESTS=()
|
||||
GCE_FLAKY_TESTS=(
|
||||
"ResourceUsage"
|
||||
)
|
||||
|
||||
# Tests which are not able to be run in parallel.
|
||||
GCE_PARALLEL_SKIP_TESTS=(
|
||||
@ -105,6 +107,7 @@ GCE_PARALLEL_SKIP_TESTS=(
|
||||
"Nodes\sNetwork"
|
||||
"Nodes\sResize"
|
||||
"MaxPods"
|
||||
"ResourceUsage"
|
||||
"SchedulerPredicates"
|
||||
"Services.*restarting"
|
||||
"Shell.*services"
|
||||
|
@ -195,6 +195,10 @@ type containerResourceUsage struct {
|
||||
CPUInterval time.Duration
|
||||
}
|
||||
|
||||
func (r *containerResourceUsage) isStrictlyGreaterThan(rhs *containerResourceUsage) bool {
|
||||
return r.CPUUsageInCores > rhs.CPUUsageInCores && r.MemoryUsageInBytes > rhs.MemoryUsageInBytes && r.MemoryWorkingSetInBytes > rhs.MemoryWorkingSetInBytes
|
||||
}
|
||||
|
||||
// getOneTimeResourceUsageOnNode queries the node's /stats/container endpoint
|
||||
// and returns the resource usage of targetContainers for the past
|
||||
// cpuInterval.
|
||||
|
129
test/e2e/monitor_resources.go
Normal file
129
test/e2e/monitor_resources.go
Normal file
@ -0,0 +1,129 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/client"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const datapointAmount = 5
|
||||
|
||||
type resourceUsagePerContainer map[string]*containerResourceUsage
|
||||
|
||||
var systemContainers = []string{"/docker-daemon", "/kubelet", "/kube-proxy", "/system"}
|
||||
|
||||
//TODO tweak those values.
|
||||
var allowedUsage = resourceUsagePerContainer{
|
||||
"/docker-daemon": &containerResourceUsage{
|
||||
CPUUsageInCores: 0.07,
|
||||
MemoryUsageInBytes: 4500000000,
|
||||
MemoryWorkingSetInBytes: 1500000000,
|
||||
},
|
||||
"/kubelet": &containerResourceUsage{
|
||||
CPUUsageInCores: 0.1,
|
||||
MemoryUsageInBytes: 150000000,
|
||||
MemoryWorkingSetInBytes: 150000000,
|
||||
},
|
||||
"/kube-proxy": &containerResourceUsage{
|
||||
CPUUsageInCores: 0.02,
|
||||
MemoryUsageInBytes: 12000000,
|
||||
MemoryWorkingSetInBytes: 12000000,
|
||||
},
|
||||
"/system": &containerResourceUsage{
|
||||
CPUUsageInCores: 0.01,
|
||||
MemoryUsageInBytes: 100000000,
|
||||
MemoryWorkingSetInBytes: 75000000,
|
||||
},
|
||||
}
|
||||
|
||||
func computeAverage(sliceOfUsages []resourceUsagePerContainer) (result resourceUsagePerContainer) {
|
||||
result = make(resourceUsagePerContainer)
|
||||
for _, container := range systemContainers {
|
||||
result[container] = &containerResourceUsage{}
|
||||
}
|
||||
for _, usage := range sliceOfUsages {
|
||||
for _, container := range systemContainers {
|
||||
singleResult := &containerResourceUsage{
|
||||
CPUUsageInCores: result[container].CPUUsageInCores + usage[container].CPUUsageInCores,
|
||||
MemoryUsageInBytes: result[container].MemoryUsageInBytes + usage[container].MemoryUsageInBytes,
|
||||
MemoryWorkingSetInBytes: result[container].MemoryWorkingSetInBytes + usage[container].MemoryWorkingSetInBytes,
|
||||
}
|
||||
result[container] = singleResult
|
||||
}
|
||||
}
|
||||
for _, container := range systemContainers {
|
||||
singleResult := &containerResourceUsage{
|
||||
CPUUsageInCores: result[container].CPUUsageInCores / float64(len(sliceOfUsages)),
|
||||
MemoryUsageInBytes: result[container].MemoryUsageInBytes / int64(len(sliceOfUsages)),
|
||||
MemoryWorkingSetInBytes: result[container].MemoryWorkingSetInBytes / int64(len(sliceOfUsages)),
|
||||
}
|
||||
result[container] = singleResult
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// This tests does nothing except checking current resource usage of containers defained in kubelet_stats systemContainers variable.
|
||||
// Test fails if an average container resource consumption over datapointAmount tries exceeds amount defined in allowedUsage.
|
||||
var _ = Describe("ResourceUsage", func() {
|
||||
var c *client.Client
|
||||
BeforeEach(func() {
|
||||
var err error
|
||||
c, err = loadClient()
|
||||
expectNoError(err)
|
||||
})
|
||||
|
||||
It("should not exceed expected amount.", func() {
|
||||
By("Getting ResourceConsumption on all nodes")
|
||||
nodeList, err := c.Nodes().List(labels.Everything(), fields.Everything())
|
||||
expectNoError(err)
|
||||
|
||||
resourceUsagePerNode := make(map[string][]resourceUsagePerContainer)
|
||||
|
||||
for i := 0; i < datapointAmount; i++ {
|
||||
for _, node := range nodeList.Items {
|
||||
resourceUsage, err := getOneTimeResourceUsageOnNode(c, node.Name, 5*time.Second)
|
||||
expectNoError(err)
|
||||
resourceUsagePerNode[node.Name] = append(resourceUsagePerNode[node.Name], resourceUsage)
|
||||
}
|
||||
time.Sleep(3 * time.Second)
|
||||
}
|
||||
|
||||
averageResourceUsagePerNode := make(map[string]resourceUsagePerContainer)
|
||||
for _, node := range nodeList.Items {
|
||||
averageResourceUsagePerNode[node.Name] = computeAverage(resourceUsagePerNode[node.Name])
|
||||
}
|
||||
|
||||
violating := make(map[string]resourceUsagePerContainer)
|
||||
for node, usage := range averageResourceUsagePerNode {
|
||||
for container, cUsage := range usage {
|
||||
Logf("%v on %v usage: %#v", container, node, cUsage)
|
||||
if !allowedUsage[container].isStrictlyGreaterThan(cUsage) {
|
||||
violating[node] = usage
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Expect(violating).To(BeEmpty())
|
||||
})
|
||||
})
|
Loading…
Reference in New Issue
Block a user