mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-22 10:06:15 +00:00
Merge pull request #14427 from mesosphere/sttts-resource-tests
Avoid assumption of a fixed list of cgroups in e2e conformance tests
This commit is contained in:
commit
13821de107
@ -103,7 +103,7 @@ echo "Conformance test checking conformance with Kubernetes version 1.0"
|
||||
# MaxPods\slimit\snumber\sof\spods: not sure why this wasn't working on GCE but it wasn't.
|
||||
# Kubectl\sclient\sSimple\spod: not sure why this wasn't working on GCE but it wasn't
|
||||
# DNS: not sure why this wasn't working on GCE but it wasn't
|
||||
export CONFORMANCE_TEST_SKIP_REGEX="Cadvisor|MasterCerts|Density|Cluster\slevel\slogging|Etcd\sfailure|Load\sCapacity|Monitoring|Namespaces.*seconds|Pod\sdisks|Reboot|Restart|Nodes|Scale|Services.*load\sbalancer|Services.*NodePort|Services.*nodeport|Shell|SSH|Addon\supdate|Volumes|Clean\sup\spods\son\snode|Skipped|skipped|MaxPods\slimit\snumber\sof\spods|Kubectl\sclient\sSimple\spod|DNS"
|
||||
export CONFORMANCE_TEST_SKIP_REGEX="Cadvisor|MasterCerts|Density|Cluster\slevel\slogging|Etcd\sfailure|Load\sCapacity|Monitoring|Namespaces.*seconds|Pod\sdisks|Reboot|Restart|Nodes|Scale|Services.*load\sbalancer|Services.*NodePort|Services.*nodeport|Shell|SSH|Addon\supdate|Volumes|Clean\sup\spods\son\snode|Skipped|skipped|MaxPods\slimit\snumber\sof\spods|Kubectl\sclient\sSimple\spod|DNS|Resource\susage\sof\ssystem\scontainers"
|
||||
|
||||
declare -x KUBERNETES_CONFORMANCE_TEST="y"
|
||||
declare -x NUM_MINIONS=4
|
||||
|
@ -119,7 +119,7 @@ GKE_REQUIRED_SKIP_TESTS=(
|
||||
GCE_FLAKY_TESTS=(
|
||||
"DaemonRestart"
|
||||
"Daemon\sset\sshould\slaunch\sa\sdaemon\spod\son\severy\snode\sof\sthe\scluster"
|
||||
"ResourceUsage"
|
||||
"Resource\susage\sof\ssystem\scontainers"
|
||||
"monotonically\sincreasing\srestart\scount"
|
||||
"should\sbe\sable\sto\schange\sthe\stype\sand\snodeport\ssettings\sof\sa\sservice" # file: service.go, issue: #13032
|
||||
"allows\sscheduling\sof\spods\son\sa\sminion\safter\sit\srejoins\sthe\scluster" # file: resize_nodes.go, issue: #13258
|
||||
@ -143,7 +143,7 @@ GCE_PARALLEL_SKIP_TESTS=(
|
||||
"Nodes\sNetwork"
|
||||
"Nodes\sResize"
|
||||
"MaxPods"
|
||||
"ResourceUsage"
|
||||
"Resource\susage\sof\ssystem\scontainers"
|
||||
"SchedulerPredicates"
|
||||
"Services.*restarting"
|
||||
"Shell.*services"
|
||||
|
@ -103,7 +103,7 @@ var _ = Describe("kubelet", func() {
|
||||
for _, node := range nodes.Items {
|
||||
nodeNames.Insert(node.Name)
|
||||
}
|
||||
resourceMonitor = newResourceMonitor(framework.Client, targetContainers, containerStatsPollingInterval)
|
||||
resourceMonitor = newResourceMonitor(framework.Client, targetContainers(), containerStatsPollingInterval)
|
||||
resourceMonitor.Start()
|
||||
})
|
||||
|
||||
|
@ -178,12 +178,20 @@ const (
|
||||
)
|
||||
|
||||
// A list of containers for which we want to collect resource usage.
|
||||
var targetContainers = []string{
|
||||
"/",
|
||||
"/docker-daemon",
|
||||
"/kubelet",
|
||||
"/kube-proxy",
|
||||
"/system",
|
||||
func targetContainers() []string {
|
||||
if providerIs("gce", "gke") {
|
||||
return []string{
|
||||
"/",
|
||||
"/docker-daemon",
|
||||
"/kubelet",
|
||||
"/kube-proxy",
|
||||
"/system",
|
||||
}
|
||||
} else {
|
||||
return []string{
|
||||
"/",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type containerResourceUsage struct {
|
||||
@ -229,8 +237,9 @@ func getOneTimeResourceUsageOnNode(c *client.Client, nodeName string, cpuInterva
|
||||
return nil, err
|
||||
}
|
||||
// Process container infos that are relevant to us.
|
||||
usageMap := make(map[string]*containerResourceUsage, len(targetContainers))
|
||||
for _, name := range targetContainers {
|
||||
containers := targetContainers()
|
||||
usageMap := make(map[string]*containerResourceUsage, len(containers))
|
||||
for _, name := range containers {
|
||||
info, ok := containerInfos[name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("missing info for container %q on node %q", name, nodeName)
|
||||
@ -491,7 +500,7 @@ func (r *resourceMonitor) LogCPUSummary() {
|
||||
buf := &bytes.Buffer{}
|
||||
w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
|
||||
fmt.Fprintf(w, "%s\n", strings.Join(header, "\t"))
|
||||
for _, containerName := range targetContainers {
|
||||
for _, containerName := range targetContainers() {
|
||||
data := collector.GetBasicCPUStats(containerName)
|
||||
var s []string
|
||||
s = append(s, fmt.Sprintf("%q", containerName))
|
||||
|
@ -83,9 +83,9 @@ func computeAverage(sliceOfUsages []resourceUsagePerContainer) (result resourceU
|
||||
return
|
||||
}
|
||||
|
||||
// This tests does nothing except checking current resource usage of containers defained in kubelet_stats systemContainers variable.
|
||||
// This tests does nothing except checking current resource usage of containers defined in kubelet_stats systemContainers variable.
|
||||
// Test fails if an average container resource consumption over datapointAmount tries exceeds amount defined in allowedUsage.
|
||||
var _ = Describe("ResourceUsage", func() {
|
||||
var _ = Describe("Resource usage of system containers", func() {
|
||||
var c *client.Client
|
||||
BeforeEach(func() {
|
||||
var err error
|
||||
|
Loading…
Reference in New Issue
Block a user