mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Merge pull request #40874 from dashpole/density_test_volumes
Automatic merge from submit-queue (batch tested with PRs 40864, 40666, 38382, 40874) Density Test includes deletion and volumes Moved the calls to deletePodSync to BEFORE logDensityTimeSeries. This is because the parser considers a line printed in logDensityTimeSeries to be the "end" of the test. This change includes deletion in the "test window", but makes no other changes. I also added volumes to the test, so that we can make sure that mounting and unmounting volumes are also taken into account for performance profiling.
This commit is contained in:
commit
12a80380bc
@ -324,7 +324,7 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg
|
|||||||
)
|
)
|
||||||
|
|
||||||
// create test pod data structure
|
// create test pod data structure
|
||||||
pods := newTestPods(testArg.podsNr, framework.GetPauseImageNameForHostArch(), podType)
|
pods := newTestPods(testArg.podsNr, true, framework.GetPauseImageNameForHostArch(), podType)
|
||||||
|
|
||||||
// the controller watches the change of pod status
|
// the controller watches the change of pod status
|
||||||
controller := newInformerWatchPod(f, mutex, watchTimes, podType)
|
controller := newInformerWatchPod(f, mutex, watchTimes, podType)
|
||||||
@ -338,9 +338,6 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg
|
|||||||
time.Sleep(sleepBeforeCreatePods)
|
time.Sleep(sleepBeforeCreatePods)
|
||||||
|
|
||||||
rc.Start()
|
rc.Start()
|
||||||
// Explicitly delete pods to prevent namespace controller cleanning up timeout
|
|
||||||
defer deletePodsSync(f, append(pods, getCadvisorPod()))
|
|
||||||
defer rc.Stop()
|
|
||||||
|
|
||||||
By("Creating a batch of pods")
|
By("Creating a batch of pods")
|
||||||
// It returns a map['pod name']'creation time' containing the creation timestamps
|
// It returns a map['pod name']'creation time' containing the creation timestamps
|
||||||
@ -387,6 +384,9 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg
|
|||||||
sort.Sort(framework.LatencySlice(e2eLags))
|
sort.Sort(framework.LatencySlice(e2eLags))
|
||||||
batchLag := lastRunning.Time.Sub(firstCreate.Time)
|
batchLag := lastRunning.Time.Sub(firstCreate.Time)
|
||||||
|
|
||||||
|
rc.Stop()
|
||||||
|
deletePodsSync(f, append(pods, getCadvisorPod()))
|
||||||
|
|
||||||
// Log time series data.
|
// Log time series data.
|
||||||
if isLogTimeSeries {
|
if isLogTimeSeries {
|
||||||
logDensityTimeSeries(rc, createTimes, watchTimes, testInfo)
|
logDensityTimeSeries(rc, createTimes, watchTimes, testInfo)
|
||||||
@ -403,8 +403,8 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de
|
|||||||
podType = "density_test_pod"
|
podType = "density_test_pod"
|
||||||
sleepBeforeCreatePods = 30 * time.Second
|
sleepBeforeCreatePods = 30 * time.Second
|
||||||
)
|
)
|
||||||
bgPods := newTestPods(testArg.bgPodsNr, framework.GetPauseImageNameForHostArch(), "background_pod")
|
bgPods := newTestPods(testArg.bgPodsNr, true, framework.GetPauseImageNameForHostArch(), "background_pod")
|
||||||
testPods := newTestPods(testArg.podsNr, framework.GetPauseImageNameForHostArch(), podType)
|
testPods := newTestPods(testArg.podsNr, true, framework.GetPauseImageNameForHostArch(), podType)
|
||||||
|
|
||||||
By("Creating a batch of background pods")
|
By("Creating a batch of background pods")
|
||||||
|
|
||||||
@ -414,13 +414,13 @@ func runDensitySeqTest(f *framework.Framework, rc *ResourceCollector, testArg de
|
|||||||
time.Sleep(sleepBeforeCreatePods)
|
time.Sleep(sleepBeforeCreatePods)
|
||||||
|
|
||||||
rc.Start()
|
rc.Start()
|
||||||
// Explicitly delete pods to prevent namespace controller cleanning up timeout
|
|
||||||
defer deletePodsSync(f, append(bgPods, append(testPods, getCadvisorPod())...))
|
|
||||||
defer rc.Stop()
|
|
||||||
|
|
||||||
// Create pods sequentially (back-to-back). e2eLags have been sorted.
|
// Create pods sequentially (back-to-back). e2eLags have been sorted.
|
||||||
batchlag, e2eLags := createBatchPodSequential(f, testPods)
|
batchlag, e2eLags := createBatchPodSequential(f, testPods)
|
||||||
|
|
||||||
|
rc.Stop()
|
||||||
|
deletePodsSync(f, append(bgPods, append(testPods, getCadvisorPod())...))
|
||||||
|
|
||||||
// Log throughput data.
|
// Log throughput data.
|
||||||
logPodCreateThroughput(batchlag, e2eLags, testArg.podsNr, testInfo)
|
logPodCreateThroughput(batchlag, e2eLags, testArg.podsNr, testInfo)
|
||||||
|
|
||||||
|
@ -386,7 +386,7 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// newTestPods creates a list of pods (specification) for test.
|
// newTestPods creates a list of pods (specification) for test.
|
||||||
func newTestPods(numPods int, imageName, podType string) []*v1.Pod {
|
func newTestPods(numPods int, volume bool, imageName, podType string) []*v1.Pod {
|
||||||
var pods []*v1.Pod
|
var pods []*v1.Pod
|
||||||
for i := 0; i < numPods; i++ {
|
for i := 0; i < numPods; i++ {
|
||||||
podName := "test-" + string(uuid.NewUUID())
|
podName := "test-" + string(uuid.NewUUID())
|
||||||
@ -394,22 +394,48 @@ func newTestPods(numPods int, imageName, podType string) []*v1.Pod {
|
|||||||
"type": podType,
|
"type": podType,
|
||||||
"name": podName,
|
"name": podName,
|
||||||
}
|
}
|
||||||
pods = append(pods,
|
if volume {
|
||||||
&v1.Pod{
|
pods = append(pods,
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
&v1.Pod{
|
||||||
Name: podName,
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Labels: labels,
|
Name: podName,
|
||||||
},
|
Labels: labels,
|
||||||
Spec: v1.PodSpec{
|
},
|
||||||
// Restart policy is always (default).
|
Spec: v1.PodSpec{
|
||||||
Containers: []v1.Container{
|
// Restart policy is always (default).
|
||||||
{
|
Containers: []v1.Container{
|
||||||
Image: imageName,
|
{
|
||||||
Name: podName,
|
Image: imageName,
|
||||||
|
Name: podName,
|
||||||
|
VolumeMounts: []v1.VolumeMount{
|
||||||
|
{MountPath: "/test-volume-mnt", Name: podName + "-volume"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Volumes: []v1.Volume{
|
||||||
|
{Name: podName + "-volume", VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{}}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
})
|
||||||
})
|
} else {
|
||||||
|
pods = append(pods,
|
||||||
|
&v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: podName,
|
||||||
|
Labels: labels,
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
// Restart policy is always (default).
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Image: imageName,
|
||||||
|
Name: podName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return pods
|
return pods
|
||||||
}
|
}
|
||||||
|
@ -142,7 +142,7 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg
|
|||||||
// sleep for an interval here to measure steady data
|
// sleep for an interval here to measure steady data
|
||||||
sleepAfterCreatePods = 10 * time.Second
|
sleepAfterCreatePods = 10 * time.Second
|
||||||
)
|
)
|
||||||
pods := newTestPods(testArg.podsNr, framework.GetPauseImageNameForHostArch(), "test_pod")
|
pods := newTestPods(testArg.podsNr, true, framework.GetPauseImageNameForHostArch(), "test_pod")
|
||||||
|
|
||||||
rc.Start()
|
rc.Start()
|
||||||
// Explicitly delete pods to prevent namespace controller cleanning up timeout
|
// Explicitly delete pods to prevent namespace controller cleanning up timeout
|
||||||
|
@ -79,7 +79,7 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
|
|||||||
Context("Network", func() {
|
Context("Network", func() {
|
||||||
It("should recover from ip leak", func() {
|
It("should recover from ip leak", func() {
|
||||||
|
|
||||||
pods := newTestPods(podCount, framework.GetPauseImageNameForHostArch(), "restart-docker-test")
|
pods := newTestPods(podCount, false, framework.GetPauseImageNameForHostArch(), "restart-docker-test")
|
||||||
By(fmt.Sprintf("Trying to create %d pods on node", len(pods)))
|
By(fmt.Sprintf("Trying to create %d pods on node", len(pods)))
|
||||||
createBatchPodWithRateControl(f, pods, podCreationInterval)
|
createBatchPodWithRateControl(f, pods, podCreationInterval)
|
||||||
defer deletePodsSync(f, pods)
|
defer deletePodsSync(f, pods)
|
||||||
|
Loading…
Reference in New Issue
Block a user