mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 05:27:21 +00:00
Merge pull request #48695 from gmarek/sig-owner
Automatic merge from submit-queue Split out max_density test and add owners to other perf tests @rrati - IIRC you're the one who added max-density test and promised to look after it. I split it out of the main density.go, so it'd be clear that this one is not owned by us. cc @kubernetes/sig-scalability-misc
This commit is contained in:
commit
b4620c3eaa
@ -299,7 +299,6 @@ func cleanupDensityTest(dtc DensityTestConfig) {
|
|||||||
var _ = framework.KubeDescribe("Density", func() {
|
var _ = framework.KubeDescribe("Density", func() {
|
||||||
var c clientset.Interface
|
var c clientset.Interface
|
||||||
var nodeCount int
|
var nodeCount int
|
||||||
var name string
|
|
||||||
var additionalPodsPrefix string
|
var additionalPodsPrefix string
|
||||||
var ns string
|
var ns string
|
||||||
var uuid string
|
var uuid string
|
||||||
@ -774,48 +773,6 @@ var _ = framework.KubeDescribe("Density", func() {
|
|||||||
cleanupDensityTest(dConfig)
|
cleanupDensityTest(dConfig)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calculate total number of pods from each node's max-pod
|
|
||||||
It("[Feature:ManualPerformance] should allow running maximum capacity pods on nodes", func() {
|
|
||||||
totalPods = 0
|
|
||||||
for _, n := range nodes.Items {
|
|
||||||
totalPods += int(n.Status.Capacity.Pods().Value())
|
|
||||||
}
|
|
||||||
totalPods -= framework.WaitForStableCluster(c, masters)
|
|
||||||
|
|
||||||
fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
|
|
||||||
framework.ExpectNoError(err)
|
|
||||||
defer fileHndl.Close()
|
|
||||||
collectionCount := 1
|
|
||||||
configs := make([]testutils.RunObjectConfig, collectionCount)
|
|
||||||
podsPerCollection := int(totalPods / collectionCount)
|
|
||||||
for i := 0; i < collectionCount; i++ {
|
|
||||||
if i == collectionCount-1 {
|
|
||||||
podsPerCollection += int(math.Mod(float64(totalPods), float64(collectionCount)))
|
|
||||||
}
|
|
||||||
name = "density" + strconv.Itoa(totalPods) + "-" + strconv.Itoa(i) + "-" + uuid
|
|
||||||
configs[i] = &testutils.RCConfig{Client: c,
|
|
||||||
Image: framework.GetPauseImageName(f.ClientSet),
|
|
||||||
Name: name,
|
|
||||||
Namespace: ns,
|
|
||||||
Labels: map[string]string{"type": "densityPod"},
|
|
||||||
PollInterval: DensityPollInterval,
|
|
||||||
PodStatusFile: fileHndl,
|
|
||||||
Replicas: podsPerCollection,
|
|
||||||
MaxContainerFailures: &MaxContainerFailures,
|
|
||||||
Silent: true,
|
|
||||||
LogFunc: framework.Logf,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dConfig := DensityTestConfig{
|
|
||||||
ClientSets: []clientset.Interface{f.ClientSet},
|
|
||||||
Configs: configs,
|
|
||||||
PodCount: totalPods,
|
|
||||||
PollInterval: DensityPollInterval,
|
|
||||||
}
|
|
||||||
e2eStartupTime = runDensityTest(dConfig)
|
|
||||||
cleanupDensityTest(dConfig)
|
|
||||||
})
|
|
||||||
})
|
})
|
||||||
|
|
||||||
func createRunningPodFromRC(wg *sync.WaitGroup, c clientset.Interface, name, ns, image, podType string, cpuRequest, memRequest resource.Quantity) {
|
func createRunningPodFromRC(wg *sync.WaitGroup, c clientset.Interface, name, ns, image, podType string, cpuRequest, memRequest resource.Quantity) {
|
||||||
|
Loading…
Reference in New Issue
Block a user