mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
changed density test to use volumes, and include deletion before logging
This commit is contained in:
parent
86d561424d
commit
ad73b325f3
@ -324,7 +324,7 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg
|
||||
)
|
||||
|
||||
// create test pod data structure
|
||||
pods := newTestPods(testArg.podsNr, framework.GetPauseImageNameForHostArch(), podType)
|
||||
pods := newTestPods(testArg.podsNr, true, framework.GetPauseImageNameForHostArch(), podType)
|
||||
|
||||
// the controller watches the change of pod status
|
||||
controller := newInformerWatchPod(f, mutex, watchTimes, podType)
|
||||
@ -338,9 +338,6 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg
|
||||
time.Sleep(sleepBeforeCreatePods)
|
||||
|
||||
rc.Start()
|
||||
// Explicitly delete pods to prevent namespace controller cleanning up timeout
|
||||
defer deletePodsSync(f, append(pods, getCadvisorPod()))
|
||||
defer rc.Stop()
|
||||
|
||||
By("Creating a batch of pods")
|
||||
// It returns a map['pod name']'creation time' containing the creation timestamps
|
||||
@ -387,6 +384,9 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg
|
||||
sort.Sort(framework.LatencySlice(e2eLags))
|
||||
batchLag := lastRunning.Time.Sub(firstCreate.Time)
|
||||
|
||||
rc.Stop()
|
||||
deletePodsSync(f, append(pods, getCadvisorPod()))
|
||||
|
||||
// Log time series data.
|
||||
if isLogTimeSeries {
|
||||
logDensityTimeSeries(rc, createTimes, watchTimes, testInfo)
|
||||
@ -403,8 +403,8 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de
|
||||
podType = "density_test_pod"
|
||||
sleepBeforeCreatePods = 30 * time.Second
|
||||
)
|
||||
bgPods := newTestPods(testArg.bgPodsNr, framework.GetPauseImageNameForHostArch(), "background_pod")
|
||||
testPods := newTestPods(testArg.podsNr, framework.GetPauseImageNameForHostArch(), podType)
|
||||
bgPods := newTestPods(testArg.bgPodsNr, true, framework.GetPauseImageNameForHostArch(), "background_pod")
|
||||
testPods := newTestPods(testArg.podsNr, true, framework.GetPauseImageNameForHostArch(), podType)
|
||||
|
||||
By("Creating a batch of background pods")
|
||||
|
||||
@ -414,13 +414,13 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de
|
||||
time.Sleep(sleepBeforeCreatePods)
|
||||
|
||||
rc.Start()
|
||||
// Explicitly delete pods to prevent namespace controller cleanning up timeout
|
||||
defer deletePodsSync(f, append(bgPods, append(testPods, getCadvisorPod())...))
|
||||
defer rc.Stop()
|
||||
|
||||
// Create pods sequentially (back-to-back). e2eLags have been sorted.
|
||||
batchlag, e2eLags := createBatchPodSequential(f, testPods)
|
||||
|
||||
rc.Stop()
|
||||
deletePodsSync(f, append(bgPods, append(testPods, getCadvisorPod())...))
|
||||
|
||||
// Log throughput data.
|
||||
logPodCreateThroughput(batchlag, e2eLags, testArg.podsNr, testInfo)
|
||||
|
||||
|
@ -386,7 +386,7 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
|
||||
}
|
||||
|
||||
// newTestPods creates a list of pods (specification) for test.
|
||||
func newTestPods(numPods int, imageName, podType string) []*v1.Pod {
|
||||
func newTestPods(numPods int, volume bool, imageName, podType string) []*v1.Pod {
|
||||
var pods []*v1.Pod
|
||||
for i := 0; i < numPods; i++ {
|
||||
podName := "test-" + string(uuid.NewUUID())
|
||||
@ -394,22 +394,48 @@ func newTestPods(numPods int, imageName, podType string) []*v1.Pod {
|
||||
"type": podType,
|
||||
"name": podName,
|
||||
}
|
||||
pods = append(pods,
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
// Restart policy is always (default).
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: imageName,
|
||||
Name: podName,
|
||||
if volume {
|
||||
pods = append(pods,
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
// Restart policy is always (default).
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: imageName,
|
||||
Name: podName,
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{MountPath: "/test-volume-mnt", Name: podName + "-volume"},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{
|
||||
{Name: podName + "-volume", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}},
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
} else {
|
||||
pods = append(pods,
|
||||
&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
// Restart policy is always (default).
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: imageName,
|
||||
Name: podName,
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
return pods
|
||||
}
|
||||
|
@ -142,7 +142,7 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg
|
||||
// sleep for an interval here to measure steady data
|
||||
sleepAfterCreatePods = 10 * time.Second
|
||||
)
|
||||
pods := newTestPods(testArg.podsNr, framework.GetPauseImageNameForHostArch(), "test_pod")
|
||||
pods := newTestPods(testArg.podsNr, true, framework.GetPauseImageNameForHostArch(), "test_pod")
|
||||
|
||||
rc.Start()
|
||||
// Explicitly delete pods to prevent namespace controller cleanning up timeout
|
||||
|
@ -79,7 +79,7 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
|
||||
Context("Network", func() {
|
||||
It("should recover from ip leak", func() {
|
||||
|
||||
pods := newTestPods(podCount, framework.GetPauseImageNameForHostArch(), "restart-docker-test")
|
||||
pods := newTestPods(podCount, false, framework.GetPauseImageNameForHostArch(), "restart-docker-test")
|
||||
By(fmt.Sprintf("Trying to create %d pods on node", len(pods)))
|
||||
createBatchPodWithRateControl(f, pods, podCreationInterval)
|
||||
defer deletePodsSync(f, pods)
|
||||
|
Loading…
Reference in New Issue
Block a user