mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Allow uncertain mount tests to run parallely
This commit is contained in:
parent
b2860a3604
commit
6f9a3374b1
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package reconciler
|
package reconciler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/md5"
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -1222,11 +1223,17 @@ func Test_UncertainDeviceGlobalMounts(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, mode := range []v1.PersistentVolumeMode{v1.PersistentVolumeBlock, v1.PersistentVolumeFilesystem} {
|
modes := []v1.PersistentVolumeMode{v1.PersistentVolumeBlock, v1.PersistentVolumeFilesystem}
|
||||||
for _, tc := range tests {
|
|
||||||
testName := fmt.Sprintf("%s [%s]", tc.name, mode)
|
|
||||||
t.Run(testName+"[", func(t *testing.T) {
|
|
||||||
|
|
||||||
|
for modeIndex := range modes {
|
||||||
|
for tcIndex := range tests {
|
||||||
|
mode := modes[modeIndex]
|
||||||
|
tc := tests[tcIndex]
|
||||||
|
testName := fmt.Sprintf("%s [%s]", tc.name, mode)
|
||||||
|
uniqueTestString := fmt.Sprintf("global-mount-%s", testName)
|
||||||
|
uniquePodDir := fmt.Sprintf("%s-%x", kubeletPodsDir, md5.Sum([]byte(uniqueTestString)))
|
||||||
|
t.Run(testName+"[", func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
pv := &v1.PersistentVolume{
|
pv := &v1.PersistentVolume{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: tc.volumeName,
|
Name: tc.volumeName,
|
||||||
@ -1297,7 +1304,7 @@ func Test_UncertainDeviceGlobalMounts(t *testing.T) {
|
|||||||
&mount.FakeMounter{},
|
&mount.FakeMounter{},
|
||||||
hostutil.NewFakeHostUtil(nil),
|
hostutil.NewFakeHostUtil(nil),
|
||||||
volumePluginMgr,
|
volumePluginMgr,
|
||||||
kubeletPodsDir)
|
uniquePodDir)
|
||||||
volumeSpec := &volume.Spec{PersistentVolume: pv}
|
volumeSpec := &volume.Spec{PersistentVolume: pv}
|
||||||
podName := util.GetUniquePodName(pod)
|
podName := util.GetUniquePodName(pod)
|
||||||
volumeName, err := dsw.AddPodToVolume(
|
volumeName, err := dsw.AddPodToVolume(
|
||||||
@ -1402,11 +1409,17 @@ func Test_UncertainVolumeMountState(t *testing.T) {
|
|||||||
supportRemount: true,
|
supportRemount: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
modes := []v1.PersistentVolumeMode{v1.PersistentVolumeBlock, v1.PersistentVolumeFilesystem}
|
||||||
|
|
||||||
for _, mode := range []v1.PersistentVolumeMode{v1.PersistentVolumeBlock, v1.PersistentVolumeFilesystem} {
|
for modeIndex := range modes {
|
||||||
for _, tc := range tests {
|
for tcIndex := range tests {
|
||||||
|
mode := modes[modeIndex]
|
||||||
|
tc := tests[tcIndex]
|
||||||
testName := fmt.Sprintf("%s [%s]", tc.name, mode)
|
testName := fmt.Sprintf("%s [%s]", tc.name, mode)
|
||||||
|
uniqueTestString := fmt.Sprintf("local-mount-%s", testName)
|
||||||
|
uniquePodDir := fmt.Sprintf("%s-%x", kubeletPodsDir, md5.Sum([]byte(uniqueTestString)))
|
||||||
t.Run(testName, func(t *testing.T) {
|
t.Run(testName, func(t *testing.T) {
|
||||||
|
t.Parallel()
|
||||||
pv := &v1.PersistentVolume{
|
pv := &v1.PersistentVolume{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: tc.volumeName,
|
Name: tc.volumeName,
|
||||||
@ -1476,7 +1489,7 @@ func Test_UncertainVolumeMountState(t *testing.T) {
|
|||||||
&mount.FakeMounter{},
|
&mount.FakeMounter{},
|
||||||
hostutil.NewFakeHostUtil(nil),
|
hostutil.NewFakeHostUtil(nil),
|
||||||
volumePluginMgr,
|
volumePluginMgr,
|
||||||
kubeletPodsDir)
|
uniquePodDir)
|
||||||
volumeSpec := &volume.Spec{PersistentVolume: pv}
|
volumeSpec := &volume.Spec{PersistentVolume: pv}
|
||||||
podName := util.GetUniquePodName(pod)
|
podName := util.GetUniquePodName(pod)
|
||||||
volumeName, err := dsw.AddPodToVolume(
|
volumeName, err := dsw.AddPodToVolume(
|
||||||
@ -1494,6 +1507,9 @@ func Test_UncertainVolumeMountState(t *testing.T) {
|
|||||||
close(stoppedChan)
|
close(stoppedChan)
|
||||||
}()
|
}()
|
||||||
waitForVolumeToExistInASW(t, volumeName, asw)
|
waitForVolumeToExistInASW(t, volumeName, asw)
|
||||||
|
// all of these tests rely on device to be globally mounted and hence waiting for global
|
||||||
|
// mount ensures that unmountDevice is called as expected.
|
||||||
|
waitForGlobalMount(t, volumeName, asw)
|
||||||
if tc.volumeName == volumetesting.TimeoutAndFailOnSetupVolumeName {
|
if tc.volumeName == volumetesting.TimeoutAndFailOnSetupVolumeName {
|
||||||
// Wait upto 10s for reconciler to catchup
|
// Wait upto 10s for reconciler to catchup
|
||||||
time.Sleep(reconcilerSyncWaitDuration)
|
time.Sleep(reconcilerSyncWaitDuration)
|
||||||
@ -1570,6 +1586,26 @@ func waitForUncertainGlobalMount(t *testing.T, volumeName v1.UniqueVolumeName, a
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func waitForGlobalMount(t *testing.T, volumeName v1.UniqueVolumeName, asw cache.ActualStateOfWorld) {
|
||||||
|
// check if volume is globally mounted
|
||||||
|
err := retryWithExponentialBackOff(
|
||||||
|
testOperationBackOffDuration,
|
||||||
|
func() (bool, error) {
|
||||||
|
mountedVolumes := asw.GetGloballyMountedVolumes()
|
||||||
|
for _, v := range mountedVolumes {
|
||||||
|
if v.VolumeName == volumeName {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("expected volume devices %s to be mounted globally", volumeName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func waitForUncertainPodMount(t *testing.T, volumeName v1.UniqueVolumeName, asw cache.ActualStateOfWorld) {
|
func waitForUncertainPodMount(t *testing.T, volumeName v1.UniqueVolumeName, asw cache.ActualStateOfWorld) {
|
||||||
// check if volume is locally pod mounted in uncertain state
|
// check if volume is locally pod mounted in uncertain state
|
||||||
err := retryWithExponentialBackOff(
|
err := retryWithExponentialBackOff(
|
||||||
|
Loading…
Reference in New Issue
Block a user