Merge pull request #14427 from mesosphere/sttts-resource-tests

Avoid assumption of a fixed list of cgroups in e2e conformance tests
This commit is contained in:
Eric Tune 2015-09-23 16:22:53 -07:00
commit 13821de107
5 changed files with 24 additions and 15 deletions

View File

@ -103,7 +103,7 @@ echo "Conformance test checking conformance with Kubernetes version 1.0"
# MaxPods\slimit\snumber\sof\spods: not sure why this wasn't working on GCE but it wasn't. # MaxPods\slimit\snumber\sof\spods: not sure why this wasn't working on GCE but it wasn't.
# Kubectl\sclient\sSimple\spod: not sure why this wasn't working on GCE but it wasn't # Kubectl\sclient\sSimple\spod: not sure why this wasn't working on GCE but it wasn't
# DNS: not sure why this wasn't working on GCE but it wasn't # DNS: not sure why this wasn't working on GCE but it wasn't
export CONFORMANCE_TEST_SKIP_REGEX="Cadvisor|MasterCerts|Density|Cluster\slevel\slogging|Etcd\sfailure|Load\sCapacity|Monitoring|Namespaces.*seconds|Pod\sdisks|Reboot|Restart|Nodes|Scale|Services.*load\sbalancer|Services.*NodePort|Services.*nodeport|Shell|SSH|Addon\supdate|Volumes|Clean\sup\spods\son\snode|Skipped|skipped|MaxPods\slimit\snumber\sof\spods|Kubectl\sclient\sSimple\spod|DNS" export CONFORMANCE_TEST_SKIP_REGEX="Cadvisor|MasterCerts|Density|Cluster\slevel\slogging|Etcd\sfailure|Load\sCapacity|Monitoring|Namespaces.*seconds|Pod\sdisks|Reboot|Restart|Nodes|Scale|Services.*load\sbalancer|Services.*NodePort|Services.*nodeport|Shell|SSH|Addon\supdate|Volumes|Clean\sup\spods\son\snode|Skipped|skipped|MaxPods\slimit\snumber\sof\spods|Kubectl\sclient\sSimple\spod|DNS|Resource\susage\sof\ssystem\scontainers"
declare -x KUBERNETES_CONFORMANCE_TEST="y" declare -x KUBERNETES_CONFORMANCE_TEST="y"
declare -x NUM_MINIONS=4 declare -x NUM_MINIONS=4

View File

@ -119,7 +119,7 @@ GKE_REQUIRED_SKIP_TESTS=(
GCE_FLAKY_TESTS=( GCE_FLAKY_TESTS=(
"DaemonRestart" "DaemonRestart"
"Daemon\sset\sshould\slaunch\sa\sdaemon\spod\son\severy\snode\sof\sthe\scluster" "Daemon\sset\sshould\slaunch\sa\sdaemon\spod\son\severy\snode\sof\sthe\scluster"
"ResourceUsage" "Resource\susage\sof\ssystem\scontainers"
"monotonically\sincreasing\srestart\scount" "monotonically\sincreasing\srestart\scount"
"should\sbe\sable\sto\schange\sthe\stype\sand\snodeport\ssettings\sof\sa\sservice" # file: service.go, issue: #13032 "should\sbe\sable\sto\schange\sthe\stype\sand\snodeport\ssettings\sof\sa\sservice" # file: service.go, issue: #13032
"allows\sscheduling\sof\spods\son\sa\sminion\safter\sit\srejoins\sthe\scluster" # file: resize_nodes.go, issue: #13258 "allows\sscheduling\sof\spods\son\sa\sminion\safter\sit\srejoins\sthe\scluster" # file: resize_nodes.go, issue: #13258
@ -143,7 +143,7 @@ GCE_PARALLEL_SKIP_TESTS=(
"Nodes\sNetwork" "Nodes\sNetwork"
"Nodes\sResize" "Nodes\sResize"
"MaxPods" "MaxPods"
"ResourceUsage" "Resource\susage\sof\ssystem\scontainers"
"SchedulerPredicates" "SchedulerPredicates"
"Services.*restarting" "Services.*restarting"
"Shell.*services" "Shell.*services"

View File

@ -103,7 +103,7 @@ var _ = Describe("kubelet", func() {
for _, node := range nodes.Items { for _, node := range nodes.Items {
nodeNames.Insert(node.Name) nodeNames.Insert(node.Name)
} }
resourceMonitor = newResourceMonitor(framework.Client, targetContainers, containerStatsPollingInterval) resourceMonitor = newResourceMonitor(framework.Client, targetContainers(), containerStatsPollingInterval)
resourceMonitor.Start() resourceMonitor.Start()
}) })

View File

@ -178,12 +178,20 @@ const (
) )
// A list of containers for which we want to collect resource usage. // A list of containers for which we want to collect resource usage.
var targetContainers = []string{ func targetContainers() []string {
"/", if providerIs("gce", "gke") {
"/docker-daemon", return []string{
"/kubelet", "/",
"/kube-proxy", "/docker-daemon",
"/system", "/kubelet",
"/kube-proxy",
"/system",
}
} else {
return []string{
"/",
}
}
} }
type containerResourceUsage struct { type containerResourceUsage struct {
@ -229,8 +237,9 @@ func getOneTimeResourceUsageOnNode(c *client.Client, nodeName string, cpuInterva
return nil, err return nil, err
} }
// Process container infos that are relevant to us. // Process container infos that are relevant to us.
usageMap := make(map[string]*containerResourceUsage, len(targetContainers)) containers := targetContainers()
for _, name := range targetContainers { usageMap := make(map[string]*containerResourceUsage, len(containers))
for _, name := range containers {
info, ok := containerInfos[name] info, ok := containerInfos[name]
if !ok { if !ok {
return nil, fmt.Errorf("missing info for container %q on node %q", name, nodeName) return nil, fmt.Errorf("missing info for container %q on node %q", name, nodeName)
@ -491,7 +500,7 @@ func (r *resourceMonitor) LogCPUSummary() {
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0) w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
fmt.Fprintf(w, "%s\n", strings.Join(header, "\t")) fmt.Fprintf(w, "%s\n", strings.Join(header, "\t"))
for _, containerName := range targetContainers { for _, containerName := range targetContainers() {
data := collector.GetBasicCPUStats(containerName) data := collector.GetBasicCPUStats(containerName)
var s []string var s []string
s = append(s, fmt.Sprintf("%q", containerName)) s = append(s, fmt.Sprintf("%q", containerName))

View File

@ -83,9 +83,9 @@ func computeAverage(sliceOfUsages []resourceUsagePerContainer) (result resourceU
return return
} }
// This tests does nothing except checking current resource usage of containers defained in kubelet_stats systemContainers variable. // This tests does nothing except checking current resource usage of containers defined in kubelet_stats systemContainers variable.
// Test fails if an average container resource consumption over datapointAmount tries exceeds amount defined in allowedUsage. // Test fails if an average container resource consumption over datapointAmount tries exceeds amount defined in allowedUsage.
var _ = Describe("ResourceUsage", func() { var _ = Describe("Resource usage of system containers", func() {
var c *client.Client var c *client.Client
BeforeEach(func() { BeforeEach(func() {
var err error var err error