mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Merge pull request #75623 from oomichi/golint-e2e-framework-k-l
Fix golint failures of e2e/framework/[k-l]*.go
This commit is contained in:
commit
63ffa3cce6
@ -43,7 +43,7 @@ import (
|
|||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
// KubeletMetric stores metrics scraped from the kubelet server's /metric endpoint.
|
// KubeletLatencyMetric stores metrics scraped from the kubelet server's /metric endpoint.
|
||||||
// TODO: Get some more structure around the metrics and this type
|
// TODO: Get some more structure around the metrics and this type
|
||||||
type KubeletLatencyMetric struct {
|
type KubeletLatencyMetric struct {
|
||||||
// eg: list, info, create
|
// eg: list, info, create
|
||||||
@ -55,7 +55,7 @@ type KubeletLatencyMetric struct {
|
|||||||
Latency time.Duration
|
Latency time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// KubeletMetricByLatency implements sort.Interface for []KubeletMetric based on
|
// KubeletLatencyMetrics implements sort.Interface for []KubeletMetric based on
|
||||||
// the latency field.
|
// the latency field.
|
||||||
type KubeletLatencyMetrics []KubeletLatencyMetric
|
type KubeletLatencyMetrics []KubeletLatencyMetric
|
||||||
|
|
||||||
@ -159,6 +159,7 @@ type RuntimeOperationErrorRate struct {
|
|||||||
TimeoutRate float64
|
TimeoutRate float64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewRuntimeOperationMonitor returns a new RuntimeOperationMonitor.
|
||||||
func NewRuntimeOperationMonitor(c clientset.Interface) *RuntimeOperationMonitor {
|
func NewRuntimeOperationMonitor(c clientset.Interface) *RuntimeOperationMonitor {
|
||||||
m := &RuntimeOperationMonitor{
|
m := &RuntimeOperationMonitor{
|
||||||
client: c,
|
client: c,
|
||||||
@ -433,7 +434,7 @@ const (
|
|||||||
rootContainerName = "/"
|
rootContainerName = "/"
|
||||||
)
|
)
|
||||||
|
|
||||||
// A list of containers for which we want to collect resource usage.
|
// TargetContainers returns a list of containers for which we want to collect resource usage.
|
||||||
func TargetContainers() []string {
|
func TargetContainers() []string {
|
||||||
return []string{
|
return []string{
|
||||||
rootContainerName,
|
rootContainerName,
|
||||||
@ -442,6 +443,7 @@ func TargetContainers() []string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ContainerResourceUsage is a structure for gathering container resource usage.
|
||||||
type ContainerResourceUsage struct {
|
type ContainerResourceUsage struct {
|
||||||
Name string
|
Name string
|
||||||
Timestamp time.Time
|
Timestamp time.Time
|
||||||
@ -457,7 +459,10 @@ func (r *ContainerResourceUsage) isStrictlyGreaterThan(rhs *ContainerResourceUsa
|
|||||||
return r.CPUUsageInCores > rhs.CPUUsageInCores && r.MemoryWorkingSetInBytes > rhs.MemoryWorkingSetInBytes
|
return r.CPUUsageInCores > rhs.CPUUsageInCores && r.MemoryWorkingSetInBytes > rhs.MemoryWorkingSetInBytes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ResourceUsagePerContainer is map of ContainerResourceUsage
|
||||||
type ResourceUsagePerContainer map[string]*ContainerResourceUsage
|
type ResourceUsagePerContainer map[string]*ContainerResourceUsage
|
||||||
|
|
||||||
|
// ResourceUsagePerNode is map of ResourceUsagePerContainer.
|
||||||
type ResourceUsagePerNode map[string]ResourceUsagePerContainer
|
type ResourceUsagePerNode map[string]ResourceUsagePerContainer
|
||||||
|
|
||||||
func formatResourceUsageStats(nodeName string, containerStats ResourceUsagePerContainer) string {
|
func formatResourceUsageStats(nodeName string, containerStats ResourceUsagePerContainer) string {
|
||||||
@ -491,6 +496,7 @@ type usageDataPerContainer struct {
|
|||||||
memWorkSetData []uint64
|
memWorkSetData []uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetKubeletHeapStats returns stats of kubelet heap.
|
||||||
func GetKubeletHeapStats(c clientset.Interface, nodeName string) (string, error) {
|
func GetKubeletHeapStats(c clientset.Interface, nodeName string) (string, error) {
|
||||||
client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap", ports.KubeletPort)
|
client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap", ports.KubeletPort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -507,6 +513,7 @@ func GetKubeletHeapStats(c clientset.Interface, nodeName string) (string, error)
|
|||||||
return strings.Join(lines[len(lines)-numLines:], "\n"), nil
|
return strings.Join(lines[len(lines)-numLines:], "\n"), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PrintAllKubeletPods outputs status of all kubelet pods into log.
|
||||||
func PrintAllKubeletPods(c clientset.Interface, nodeName string) {
|
func PrintAllKubeletPods(c clientset.Interface, nodeName string) {
|
||||||
podList, err := GetKubeletPods(c, nodeName)
|
podList, err := GetKubeletPods(c, nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -661,6 +668,7 @@ type ResourceMonitor struct {
|
|||||||
collectors map[string]*resourceCollector
|
collectors map[string]*resourceCollector
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewResourceMonitor returns a new ResourceMonitor.
|
||||||
func NewResourceMonitor(c clientset.Interface, containerNames []string, pollingInterval time.Duration) *ResourceMonitor {
|
func NewResourceMonitor(c clientset.Interface, containerNames []string, pollingInterval time.Duration) *ResourceMonitor {
|
||||||
return &ResourceMonitor{
|
return &ResourceMonitor{
|
||||||
containers: containerNames,
|
containers: containerNames,
|
||||||
@ -669,6 +677,7 @@ func NewResourceMonitor(c clientset.Interface, containerNames []string, pollingI
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Start starts collectors.
|
||||||
func (r *ResourceMonitor) Start() {
|
func (r *ResourceMonitor) Start() {
|
||||||
// It should be OK to monitor unschedulable Nodes
|
// It should be OK to monitor unschedulable Nodes
|
||||||
nodes, err := r.client.CoreV1().Nodes().List(metav1.ListOptions{})
|
nodes, err := r.client.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||||
@ -683,18 +692,21 @@ func (r *ResourceMonitor) Start() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Stop stops collectors.
|
||||||
func (r *ResourceMonitor) Stop() {
|
func (r *ResourceMonitor) Stop() {
|
||||||
for _, collector := range r.collectors {
|
for _, collector := range r.collectors {
|
||||||
collector.Stop()
|
collector.Stop()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Reset resets collectors.
|
||||||
func (r *ResourceMonitor) Reset() {
|
func (r *ResourceMonitor) Reset() {
|
||||||
for _, collector := range r.collectors {
|
for _, collector := range r.collectors {
|
||||||
collector.Reset()
|
collector.Reset()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LogLatest outputs the latest resource usage into log.
|
||||||
func (r *ResourceMonitor) LogLatest() {
|
func (r *ResourceMonitor) LogLatest() {
|
||||||
summary, err := r.GetLatest()
|
summary, err := r.GetLatest()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -703,6 +715,8 @@ func (r *ResourceMonitor) LogLatest() {
|
|||||||
Logf("%s", r.FormatResourceUsage(summary))
|
Logf("%s", r.FormatResourceUsage(summary))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FormatResourceUsage returns the formatted string for LogLatest().
|
||||||
|
// TODO(oomichi): This can be made to local function after making test/e2e/node/kubelet_perf.go use LogLatest directly instead.
|
||||||
func (r *ResourceMonitor) FormatResourceUsage(s ResourceUsagePerNode) string {
|
func (r *ResourceMonitor) FormatResourceUsage(s ResourceUsagePerNode) string {
|
||||||
summary := []string{}
|
summary := []string{}
|
||||||
for node, usage := range s {
|
for node, usage := range s {
|
||||||
@ -711,6 +725,7 @@ func (r *ResourceMonitor) FormatResourceUsage(s ResourceUsagePerNode) string {
|
|||||||
return strings.Join(summary, "\n")
|
return strings.Join(summary, "\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetLatest returns the latest resource usage.
|
||||||
func (r *ResourceMonitor) GetLatest() (ResourceUsagePerNode, error) {
|
func (r *ResourceMonitor) GetLatest() (ResourceUsagePerNode, error) {
|
||||||
result := make(ResourceUsagePerNode)
|
result := make(ResourceUsagePerNode)
|
||||||
errs := []error{}
|
errs := []error{}
|
||||||
@ -725,6 +740,7 @@ func (r *ResourceMonitor) GetLatest() (ResourceUsagePerNode, error) {
|
|||||||
return result, utilerrors.NewAggregate(errs)
|
return result, utilerrors.NewAggregate(errs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetMasterNodeLatest returns the latest resource usage of master and node.
|
||||||
func (r *ResourceMonitor) GetMasterNodeLatest(usagePerNode ResourceUsagePerNode) ResourceUsagePerNode {
|
func (r *ResourceMonitor) GetMasterNodeLatest(usagePerNode ResourceUsagePerNode) ResourceUsagePerNode {
|
||||||
result := make(ResourceUsagePerNode)
|
result := make(ResourceUsagePerNode)
|
||||||
var masterUsage ResourceUsagePerContainer
|
var masterUsage ResourceUsagePerContainer
|
||||||
@ -767,6 +783,7 @@ type ContainersCPUSummary map[string]map[float64]float64
|
|||||||
// ContainersCPUSummary map.
|
// ContainersCPUSummary map.
|
||||||
type NodesCPUSummary map[string]ContainersCPUSummary
|
type NodesCPUSummary map[string]ContainersCPUSummary
|
||||||
|
|
||||||
|
// FormatCPUSummary returns the string of human-readable CPU summary from the specified summary data.
|
||||||
func (r *ResourceMonitor) FormatCPUSummary(summary NodesCPUSummary) string {
|
func (r *ResourceMonitor) FormatCPUSummary(summary NodesCPUSummary) string {
|
||||||
// Example output for a node (the percentiles may differ):
|
// Example output for a node (the percentiles may differ):
|
||||||
// CPU usage of containers on node "e2e-test-foo-node-0vj7":
|
// CPU usage of containers on node "e2e-test-foo-node-0vj7":
|
||||||
@ -804,11 +821,13 @@ func (r *ResourceMonitor) FormatCPUSummary(summary NodesCPUSummary) string {
|
|||||||
return strings.Join(summaryStrings, "\n")
|
return strings.Join(summaryStrings, "\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LogCPUSummary outputs summary of CPU into log.
|
||||||
func (r *ResourceMonitor) LogCPUSummary() {
|
func (r *ResourceMonitor) LogCPUSummary() {
|
||||||
summary := r.GetCPUSummary()
|
summary := r.GetCPUSummary()
|
||||||
Logf("%s", r.FormatCPUSummary(summary))
|
Logf("%s", r.FormatCPUSummary(summary))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetCPUSummary returns summary of CPU.
|
||||||
func (r *ResourceMonitor) GetCPUSummary() NodesCPUSummary {
|
func (r *ResourceMonitor) GetCPUSummary() NodesCPUSummary {
|
||||||
result := make(NodesCPUSummary)
|
result := make(NodesCPUSummary)
|
||||||
for nodeName, collector := range r.collectors {
|
for nodeName, collector := range r.collectors {
|
||||||
@ -821,6 +840,7 @@ func (r *ResourceMonitor) GetCPUSummary() NodesCPUSummary {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetMasterNodeCPUSummary returns summary of master node CPUs.
|
||||||
func (r *ResourceMonitor) GetMasterNodeCPUSummary(summaryPerNode NodesCPUSummary) NodesCPUSummary {
|
func (r *ResourceMonitor) GetMasterNodeCPUSummary(summaryPerNode NodesCPUSummary) NodesCPUSummary {
|
||||||
result := make(NodesCPUSummary)
|
result := make(NodesCPUSummary)
|
||||||
var masterSummary ContainersCPUSummary
|
var masterSummary ContainersCPUSummary
|
||||||
|
@ -75,16 +75,20 @@ type LogsSizeVerifier struct {
|
|||||||
workers []*LogSizeGatherer
|
workers []*LogSizeGatherer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SingleLogSummary is a structure for handling average generation rate and number of probes.
|
||||||
type SingleLogSummary struct {
|
type SingleLogSummary struct {
|
||||||
AverageGenerationRate int
|
AverageGenerationRate int
|
||||||
NumberOfProbes int
|
NumberOfProbes int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LogSizeDataTimeseries is map of timestamped size.
|
||||||
type LogSizeDataTimeseries map[string]map[string][]TimestampedSize
|
type LogSizeDataTimeseries map[string]map[string][]TimestampedSize
|
||||||
|
|
||||||
|
// LogsSizeDataSummary is map of log summary.
|
||||||
// node -> file -> data
|
// node -> file -> data
|
||||||
type LogsSizeDataSummary map[string]map[string]SingleLogSummary
|
type LogsSizeDataSummary map[string]map[string]SingleLogSummary
|
||||||
|
|
||||||
|
// PrintHumanReadable returns string of log size data summary.
|
||||||
// TODO: make sure that we don't need locking here
|
// TODO: make sure that we don't need locking here
|
||||||
func (s *LogsSizeDataSummary) PrintHumanReadable() string {
|
func (s *LogsSizeDataSummary) PrintHumanReadable() string {
|
||||||
buf := &bytes.Buffer{}
|
buf := &bytes.Buffer{}
|
||||||
@ -100,14 +104,17 @@ func (s *LogsSizeDataSummary) PrintHumanReadable() string {
|
|||||||
return buf.String()
|
return buf.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PrintJSON returns the summary of log size data with JSON format.
|
||||||
func (s *LogsSizeDataSummary) PrintJSON() string {
|
func (s *LogsSizeDataSummary) PrintJSON() string {
|
||||||
return PrettyPrintJSON(*s)
|
return PrettyPrintJSON(*s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SummaryKind returns the summary of log size data summary.
|
||||||
func (s *LogsSizeDataSummary) SummaryKind() string {
|
func (s *LogsSizeDataSummary) SummaryKind() string {
|
||||||
return "LogSizeSummary"
|
return "LogSizeSummary"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// LogsSizeData is a structure for handling timeseries of log size data and lock.
|
||||||
type LogsSizeData struct {
|
type LogsSizeData struct {
|
||||||
data LogSizeDataTimeseries
|
data LogSizeDataTimeseries
|
||||||
lock sync.Mutex
|
lock sync.Mutex
|
||||||
@ -133,7 +140,7 @@ func prepareData(masterAddress string, nodeAddresses []string) *LogsSizeData {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *LogsSizeData) AddNewData(ip, path string, timestamp time.Time, size int) {
|
func (d *LogsSizeData) addNewData(ip, path string, timestamp time.Time, size int) {
|
||||||
d.lock.Lock()
|
d.lock.Lock()
|
||||||
defer d.lock.Unlock()
|
defer d.lock.Unlock()
|
||||||
d.data[ip][path] = append(
|
d.data[ip][path] = append(
|
||||||
@ -197,26 +204,27 @@ func (s *LogsSizeVerifier) GetSummary() *LogsSizeDataSummary {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Run starts log size gathering. It starts a gorouting for every worker and then blocks until stopChannel is closed
|
// Run starts log size gathering. It starts a gorouting for every worker and then blocks until stopChannel is closed
|
||||||
func (v *LogsSizeVerifier) Run() {
|
func (s *LogsSizeVerifier) Run() {
|
||||||
v.workChannel <- WorkItem{
|
s.workChannel <- WorkItem{
|
||||||
ip: v.masterAddress,
|
ip: s.masterAddress,
|
||||||
paths: masterLogsToCheck,
|
paths: masterLogsToCheck,
|
||||||
backoffMultiplier: 1,
|
backoffMultiplier: 1,
|
||||||
}
|
}
|
||||||
for _, node := range v.nodeAddresses {
|
for _, node := range s.nodeAddresses {
|
||||||
v.workChannel <- WorkItem{
|
s.workChannel <- WorkItem{
|
||||||
ip: node,
|
ip: node,
|
||||||
paths: nodeLogsToCheck,
|
paths: nodeLogsToCheck,
|
||||||
backoffMultiplier: 1,
|
backoffMultiplier: 1,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, worker := range v.workers {
|
for _, worker := range s.workers {
|
||||||
go worker.Run()
|
go worker.Run()
|
||||||
}
|
}
|
||||||
<-v.stopChannel
|
<-s.stopChannel
|
||||||
v.wg.Wait()
|
s.wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Run starts log size gathering.
|
||||||
func (g *LogSizeGatherer) Run() {
|
func (g *LogSizeGatherer) Run() {
|
||||||
for g.Work() {
|
for g.Work() {
|
||||||
}
|
}
|
||||||
@ -270,7 +278,7 @@ func (g *LogSizeGatherer) Work() bool {
|
|||||||
Logf("Error during conversion to int: %v, skipping data. Error: %v", results[i+1], err)
|
Logf("Error during conversion to int: %v, skipping data. Error: %v", results[i+1], err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
g.data.AddNewData(workItem.ip, path, now, size)
|
g.data.addNewData(workItem.ip, path, now, size)
|
||||||
}
|
}
|
||||||
go g.pushWorkItem(workItem)
|
go g.pushWorkItem(workItem)
|
||||||
return true
|
return true
|
||||||
|
Loading…
Reference in New Issue
Block a user