mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 09:22:44 +00:00
Merge pull request #46423 from gmarek/fix_perf
Automatic merge from submit-queue (batch tested with PRs 45949, 46009, 46320, 46423, 46437) Fix performance test issues Fix #46198
This commit is contained in:
commit
b8dc4915f7
@ -39,7 +39,6 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/master/ports"
|
"k8s.io/kubernetes/pkg/master/ports"
|
||||||
"k8s.io/kubernetes/pkg/metrics"
|
"k8s.io/kubernetes/pkg/metrics"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -367,10 +366,8 @@ func getOneTimeResourceUsageOnNode(
|
|||||||
}
|
}
|
||||||
|
|
||||||
f := func(name string, newStats *stats.ContainerStats) *ContainerResourceUsage {
|
f := func(name string, newStats *stats.ContainerStats) *ContainerResourceUsage {
|
||||||
// TODO(gmarek): remove when #46198 is debugged.
|
if newStats == nil || newStats.CPU == nil || newStats.Memory == nil {
|
||||||
if newStats == nil || newStats.CPU == nil {
|
return nil
|
||||||
glog.Warning("NewStats is %#v for container %v", newStats, name)
|
|
||||||
return &ContainerResourceUsage{}
|
|
||||||
}
|
}
|
||||||
return &ContainerResourceUsage{
|
return &ContainerResourceUsage{
|
||||||
Name: name,
|
Name: name,
|
||||||
@ -399,7 +396,9 @@ func getOneTimeResourceUsageOnNode(
|
|||||||
if !isInteresting {
|
if !isInteresting {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
usageMap[pod.PodRef.Name+"/"+container.Name] = f(pod.PodRef.Name+"/"+container.Name, &container)
|
if usage := f(pod.PodRef.Name+"/"+container.Name, &container); usage != nil {
|
||||||
|
usageMap[pod.PodRef.Name+"/"+container.Name] = usage
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return usageMap, nil
|
return usageMap, nil
|
||||||
|
@ -59,6 +59,8 @@ const (
|
|||||||
// nodeCountPerNamespace determines how many namespaces we will be using
|
// nodeCountPerNamespace determines how many namespaces we will be using
|
||||||
// depending on the number of nodes in the underlying cluster.
|
// depending on the number of nodes in the underlying cluster.
|
||||||
nodeCountPerNamespace = 100
|
nodeCountPerNamespace = 100
|
||||||
|
// How many threads will be used to create/delete services during this test.
|
||||||
|
serviceOperationsParallelism = 5
|
||||||
)
|
)
|
||||||
|
|
||||||
var randomKind = schema.GroupKind{Kind: "Random"}
|
var randomKind = schema.GroupKind{Kind: "Random"}
|
||||||
@ -190,7 +192,7 @@ var _ = framework.KubeDescribe("Load capacity", func() {
|
|||||||
_, err := clientset.Core().Services(services[i].Namespace).Create(services[i])
|
_, err := clientset.Core().Services(services[i].Namespace).Create(services[i])
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
workqueue.Parallelize(25, len(services), createService)
|
workqueue.Parallelize(serviceOperationsParallelism, len(services), createService)
|
||||||
framework.Logf("%v Services created.", len(services))
|
framework.Logf("%v Services created.", len(services))
|
||||||
defer func(services []*v1.Service) {
|
defer func(services []*v1.Service) {
|
||||||
framework.Logf("Starting to delete services...")
|
framework.Logf("Starting to delete services...")
|
||||||
@ -198,7 +200,7 @@ var _ = framework.KubeDescribe("Load capacity", func() {
|
|||||||
err := clientset.Core().Services(services[i].Namespace).Delete(services[i].Name, nil)
|
err := clientset.Core().Services(services[i].Namespace).Delete(services[i].Name, nil)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
workqueue.Parallelize(25, len(services), deleteService)
|
workqueue.Parallelize(serviceOperationsParallelism, len(services), deleteService)
|
||||||
framework.Logf("Services deleted")
|
framework.Logf("Services deleted")
|
||||||
}(services)
|
}(services)
|
||||||
} else {
|
} else {
|
||||||
|
@ -303,8 +303,8 @@ current-context: kubemark-context")
|
|||||||
metrics_mem=$((200 + ${metrics_mem_per_node}*${NUM_NODES:-10}))
|
metrics_mem=$((200 + ${metrics_mem_per_node}*${NUM_NODES:-10}))
|
||||||
sed -i'' -e "s/{{METRICS_MEM}}/${metrics_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
|
sed -i'' -e "s/{{METRICS_MEM}}/${metrics_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
|
||||||
metrics_cpu_per_node_numerator=${NUM_NODES:-10}
|
metrics_cpu_per_node_numerator=${NUM_NODES:-10}
|
||||||
metrics_cpu_per_node_denumerator=2
|
metrics_cpu_per_node_denominator=2
|
||||||
metrics_cpu=$((80 + metrics_cpu_per_node_numerator / metrics_cpu_per_node_denumerator))
|
metrics_cpu=$((80 + metrics_cpu_per_node_numerator / metrics_cpu_per_node_denominator))
|
||||||
sed -i'' -e "s/{{METRICS_CPU}}/${metrics_cpu}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
|
sed -i'' -e "s/{{METRICS_CPU}}/${metrics_cpu}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
|
||||||
eventer_mem_per_node=500
|
eventer_mem_per_node=500
|
||||||
eventer_mem=$((200 * 1024 + ${eventer_mem_per_node}*${NUM_NODES:-10}))
|
eventer_mem=$((200 * 1024 + ${eventer_mem_per_node}*${NUM_NODES:-10}))
|
||||||
|
Loading…
Reference in New Issue
Block a user