mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-26 05:03:09 +00:00
Reduce indents of resource_usage_gatherer
test/e2e/framework/resource_usage_gatherer contained much indents and they can be reduced with `continue` for code readability.
This commit is contained in:
parent
cc46849212
commit
de66a736e8
@ -266,58 +266,59 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
|
|||||||
probeDuration: options.ProbeDuration,
|
probeDuration: options.ProbeDuration,
|
||||||
printVerboseLogs: options.PrintVerboseLogs,
|
printVerboseLogs: options.PrintVerboseLogs,
|
||||||
})
|
})
|
||||||
} else {
|
return &g, nil
|
||||||
// Tracks kube-system pods if no valid PodList is passed in.
|
}
|
||||||
var err error
|
|
||||||
if pods == nil {
|
// Tracks kube-system pods if no valid PodList is passed in.
|
||||||
pods, err = c.CoreV1().Pods("kube-system").List(metav1.ListOptions{})
|
var err error
|
||||||
if err != nil {
|
if pods == nil {
|
||||||
e2elog.Logf("Error while listing Pods: %v", err)
|
pods, err = c.CoreV1().Pods("kube-system").List(metav1.ListOptions{})
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dnsNodes := make(map[string]bool)
|
|
||||||
for _, pod := range pods.Items {
|
|
||||||
if (options.Nodes == MasterNodes) && !system.IsMasterNode(pod.Spec.NodeName) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if (options.Nodes == MasterAndDNSNodes) && !system.IsMasterNode(pod.Spec.NodeName) && pod.Labels["k8s-app"] != "kube-dns" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, container := range pod.Status.InitContainerStatuses {
|
|
||||||
g.containerIDs = append(g.containerIDs, container.Name)
|
|
||||||
}
|
|
||||||
for _, container := range pod.Status.ContainerStatuses {
|
|
||||||
g.containerIDs = append(g.containerIDs, container.Name)
|
|
||||||
}
|
|
||||||
if options.Nodes == MasterAndDNSNodes {
|
|
||||||
dnsNodes[pod.Spec.NodeName] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("Error while listing Nodes: %v", err)
|
e2elog.Logf("Error while listing Pods: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
dnsNodes := make(map[string]bool)
|
||||||
|
for _, pod := range pods.Items {
|
||||||
|
if (options.Nodes == MasterNodes) && !system.IsMasterNode(pod.Spec.NodeName) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if (options.Nodes == MasterAndDNSNodes) && !system.IsMasterNode(pod.Spec.NodeName) && pod.Labels["k8s-app"] != "kube-dns" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, container := range pod.Status.InitContainerStatuses {
|
||||||
|
g.containerIDs = append(g.containerIDs, container.Name)
|
||||||
|
}
|
||||||
|
for _, container := range pod.Status.ContainerStatuses {
|
||||||
|
g.containerIDs = append(g.containerIDs, container.Name)
|
||||||
|
}
|
||||||
|
if options.Nodes == MasterAndDNSNodes {
|
||||||
|
dnsNodes[pod.Spec.NodeName] = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
nodeList, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Logf("Error while listing Nodes: %v", err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
for _, node := range nodeList.Items {
|
for _, node := range nodeList.Items {
|
||||||
if options.Nodes == AllNodes || system.IsMasterNode(node.Name) || dnsNodes[node.Name] {
|
if options.Nodes == AllNodes || system.IsMasterNode(node.Name) || dnsNodes[node.Name] {
|
||||||
g.workerWg.Add(1)
|
g.workerWg.Add(1)
|
||||||
g.workers = append(g.workers, resourceGatherWorker{
|
g.workers = append(g.workers, resourceGatherWorker{
|
||||||
c: c,
|
c: c,
|
||||||
nodeName: node.Name,
|
nodeName: node.Name,
|
||||||
wg: &g.workerWg,
|
wg: &g.workerWg,
|
||||||
containerIDs: g.containerIDs,
|
containerIDs: g.containerIDs,
|
||||||
stopCh: g.stopCh,
|
stopCh: g.stopCh,
|
||||||
finished: false,
|
finished: false,
|
||||||
inKubemark: false,
|
inKubemark: false,
|
||||||
resourceDataGatheringPeriod: options.ResourceDataGatheringPeriod,
|
resourceDataGatheringPeriod: options.ResourceDataGatheringPeriod,
|
||||||
probeDuration: options.ProbeDuration,
|
probeDuration: options.ProbeDuration,
|
||||||
printVerboseLogs: options.PrintVerboseLogs,
|
printVerboseLogs: options.PrintVerboseLogs,
|
||||||
})
|
})
|
||||||
if options.Nodes == MasterNodes {
|
if options.Nodes == MasterNodes {
|
||||||
break
|
break
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -392,32 +393,36 @@ func (g *ContainerResourceGatherer) StopAndSummarize(percentiles []int, constrai
|
|||||||
CPU: usage.CPUUsageInCores,
|
CPU: usage.CPUUsageInCores,
|
||||||
Mem: usage.MemoryWorkingSetInBytes,
|
Mem: usage.MemoryWorkingSetInBytes,
|
||||||
})
|
})
|
||||||
|
|
||||||
// Verifying 99th percentile of resource usage
|
// Verifying 99th percentile of resource usage
|
||||||
if perc == 99 {
|
if perc != 99 {
|
||||||
// Name has a form: <pod_name>/<container_name>
|
continue
|
||||||
containerName := strings.Split(name, "/")[1]
|
}
|
||||||
if constraint, ok := constraints[containerName]; ok {
|
// Name has a form: <pod_name>/<container_name>
|
||||||
if usage.CPUUsageInCores > constraint.CPUConstraint {
|
containerName := strings.Split(name, "/")[1]
|
||||||
violatedConstraints = append(
|
constraint, ok := constraints[containerName]
|
||||||
violatedConstraints,
|
if !ok {
|
||||||
fmt.Sprintf("Container %v is using %v/%v CPU",
|
continue
|
||||||
name,
|
}
|
||||||
usage.CPUUsageInCores,
|
if usage.CPUUsageInCores > constraint.CPUConstraint {
|
||||||
constraint.CPUConstraint,
|
violatedConstraints = append(
|
||||||
),
|
violatedConstraints,
|
||||||
)
|
fmt.Sprintf("Container %v is using %v/%v CPU",
|
||||||
}
|
name,
|
||||||
if usage.MemoryWorkingSetInBytes > constraint.MemoryConstraint {
|
usage.CPUUsageInCores,
|
||||||
violatedConstraints = append(
|
constraint.CPUConstraint,
|
||||||
violatedConstraints,
|
),
|
||||||
fmt.Sprintf("Container %v is using %v/%v MB of memory",
|
)
|
||||||
name,
|
}
|
||||||
float64(usage.MemoryWorkingSetInBytes)/(1024*1024),
|
if usage.MemoryWorkingSetInBytes > constraint.MemoryConstraint {
|
||||||
float64(constraint.MemoryConstraint)/(1024*1024),
|
violatedConstraints = append(
|
||||||
),
|
violatedConstraints,
|
||||||
)
|
fmt.Sprintf("Container %v is using %v/%v MB of memory",
|
||||||
}
|
name,
|
||||||
}
|
float64(usage.MemoryWorkingSetInBytes)/(1024*1024),
|
||||||
|
float64(constraint.MemoryConstraint)/(1024*1024),
|
||||||
|
),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user