Add a validation step to add-on resource monitoring

This commit is contained in:
gmarek 2015-11-30 15:29:40 +01:00
parent ea14d1c309
commit fa4f04e176
3 changed files with 78 additions and 3 deletions

View File

@ -70,6 +70,39 @@ func extractLatencyMetrics(latencies []podLatencyData) LatencyMetric {
return LatencyMetric{Perc50: perc50, Perc90: perc90, Perc99: perc99}
}
func density30AddonResourceVerifier() map[string]resourceConstraint {
constraints := make(map[string]resourceConstraint)
constraints["fluentd-elasticsearch"] = resourceConstraint{
cpuConstraint: 0.03,
memoryConstraint: 150 * (1024 * 1024),
}
constraints["elasticsearch-logging"] = resourceConstraint{
cpuConstraint: 2,
memoryConstraint: 750 * (1024 * 1024),
}
constraints["heapster"] = resourceConstraint{
cpuConstraint: 2,
memoryConstraint: 1800 * (1024 * 1024),
}
constraints["kibana-logging"] = resourceConstraint{
cpuConstraint: 0.01,
memoryConstraint: 100 * (1024 * 1024),
}
constraints["kube-proxy"] = resourceConstraint{
cpuConstraint: 0.01,
memoryConstraint: 20 * (1024 * 1024),
}
constraints["l7-lb-controller"] = resourceConstraint{
cpuConstraint: 0.02,
memoryConstraint: 20 * (1024 * 1024),
}
constraints["influxdb"] = resourceConstraint{
cpuConstraint: 2,
memoryConstraint: 300 * (1024 * 1024),
}
return constraints
}
// This test suite can take a long time to run, and can affect or be affected by other tests.
// So by default it is added to the ginkgo.skip list (see driver.go).
// To run this suite you must explicitly ask for it by setting the
@ -177,6 +210,7 @@ var _ = Describe("Density [Skipped]", func() {
name := fmt.Sprintf("should allow starting %d pods per node", testArg.podsPerNode)
if testArg.podsPerNode == 30 {
name = "[Performance] " + name
framework.addonResourceConstraints = density30AddonResourceVerifier()
}
itArg := testArg
It(name, func() {

View File

@ -43,6 +43,10 @@ type Framework struct {
NamespaceDeletionTimeout time.Duration
gatherer containerResourceGatherer
// Constraints that passed to a check which is exectued after data is gathered to
// see if 99% of results are within acceptable bounds. It as to be injected in the test,
// as expectations vary greatly. Constraints are groupped by the container names.
addonResourceConstraints map[string]resourceConstraint
logsSizeWaitGroup sync.WaitGroup
logsSizeCloseChannel chan bool
@ -53,7 +57,8 @@ type Framework struct {
// you (you can write additional before/after each functions).
func NewFramework(baseName string) *Framework {
f := &Framework{
BaseName: baseName,
BaseName: baseName,
addonResourceConstraints: make(map[string]resourceConstraint),
}
BeforeEach(f.beforeEach)
@ -140,7 +145,7 @@ func (f *Framework) afterEach() {
}
if testContext.GatherKubeSystemResourceUsageData {
f.gatherer.stopAndPrintData([]int{50, 90, 99, 100})
f.gatherer.stopAndPrintData([]int{50, 90, 99, 100}, f.addonResourceConstraints)
}
if testContext.GatherLogsSizes {

View File

@ -41,6 +41,8 @@ import (
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/sets"
. "github.com/onsi/gomega"
)
// KubeletMetric stores metrics scraped from the kubelet server's /metric endpoint.
@ -403,6 +405,11 @@ func computePercentiles(timeSeries map[time.Time]resourceUsagePerContainer, perc
return result
}
type resourceConstraint struct {
cpuConstraint float64
memoryConstraint int64
}
type containerResourceGatherer struct {
usageTimeseries map[time.Time]resourceUsagePerContainer
stopCh chan struct{}
@ -433,7 +440,7 @@ func (g *containerResourceGatherer) startGatheringData(c *client.Client, period
}()
}
func (g *containerResourceGatherer) stopAndPrintData(percentiles []int) {
func (g *containerResourceGatherer) stopAndPrintData(percentiles []int, constraints map[string]resourceConstraint) {
close(g.stopCh)
g.timer.Stop()
g.wg.Wait()
@ -447,6 +454,7 @@ func (g *containerResourceGatherer) stopAndPrintData(percentiles []int) {
sortedKeys = append(sortedKeys, name)
}
sort.Strings(sortedKeys)
violatedConstraints := make([]string, 0)
for _, perc := range percentiles {
buf := &bytes.Buffer{}
w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
@ -454,10 +462,38 @@ func (g *containerResourceGatherer) stopAndPrintData(percentiles []int) {
for _, name := range sortedKeys {
usage := stats[perc][name]
fmt.Fprintf(w, "%q\t%.3f\t%.2f\n", name, usage.CPUUsageInCores, float64(usage.MemoryWorkingSetInBytes)/(1024*1024))
// Verifying 99th percentile of resource usage
if perc == 99 {
// Name has a form: <pod_name>/<container_name>
containerName := strings.Split(name, "/")[1]
if constraint, ok := constraints[containerName]; ok {
if usage.CPUUsageInCores > constraint.cpuConstraint {
violatedConstraints = append(
violatedConstraints,
fmt.Sprintf("Container %v is using %v/%v CPU",
name,
usage.CPUUsageInCores,
constraint.cpuConstraint,
),
)
}
if usage.MemoryWorkingSetInBytes > constraint.memoryConstraint {
violatedConstraints = append(
violatedConstraints,
fmt.Sprintf("Container %v is using %v/%v MB of memory",
name,
float64(usage.MemoryWorkingSetInBytes)/(1024*1024),
float64(constraint.memoryConstraint)/(1024*1024),
),
)
}
}
}
}
w.Flush()
Logf("%v percentile:\n%v", perc, buf.String())
}
Expect(violatedConstraints).To(BeEmpty())
}
// Performs a get on a node proxy endpoint given the nodename and rest client.