mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 22:46:12 +00:00
Add unit test for verifying if processReconstructedVolumes works as expected
This commit is contained in:
parent
b8257e8c01
commit
b455270f6e
@ -290,7 +290,7 @@ func (rc *reconciler) processReconstructedVolumes() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if uncertainVolumeCount > 0 {
|
if uncertainVolumeCount > 0 {
|
||||||
// If the volume has device to mount, we mark its device as mounted.
|
// If the volume has device to mount, we mark its device as uncertain
|
||||||
if glblVolumeInfo.deviceMounter != nil || glblVolumeInfo.blockVolumeMapper != nil {
|
if glblVolumeInfo.deviceMounter != nil || glblVolumeInfo.blockVolumeMapper != nil {
|
||||||
deviceMountPath, err := getDeviceMountPath(glblVolumeInfo)
|
deviceMountPath, err := getDeviceMountPath(glblVolumeInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -2239,10 +2239,12 @@ func getReconciler(kubeletDir string, t *testing.T, volumePaths []string) (Recon
|
|||||||
|
|
||||||
func TestSyncStates(t *testing.T) {
|
func TestSyncStates(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
volumePaths []string
|
volumePaths []string
|
||||||
createMountPoint bool
|
createMountPoint bool
|
||||||
verifyFunc func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error
|
addToDSOW bool
|
||||||
|
postSyncStatCallback func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error
|
||||||
|
verifyFunc func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "when two pods are using same volume and both are deleted",
|
name: "when two pods are using same volume and both are deleted",
|
||||||
@ -2275,6 +2277,29 @@ func TestSyncStates(t *testing.T) {
|
|||||||
})
|
})
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "when volume exists in dsow, volume should be recorded in skipped during reconstruction",
|
||||||
|
volumePaths: []string{
|
||||||
|
path.Join("pod1uid", "volumes", "fake-plugin", "volume-name"),
|
||||||
|
},
|
||||||
|
createMountPoint: true,
|
||||||
|
addToDSOW: true,
|
||||||
|
postSyncStatCallback: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error {
|
||||||
|
skippedVolumes := rcInstance.skippedDuringReconstruction
|
||||||
|
if len(skippedVolumes) != 1 {
|
||||||
|
return fmt.Errorf("expected 1 pods to in skippedDuringReconstruction got %d", len(skippedVolumes))
|
||||||
|
}
|
||||||
|
rcInstance.processReconstructedVolumes()
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
verifyFunc: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error {
|
||||||
|
mountedPods := rcInstance.actualStateOfWorld.GetAllMountedVolumes()
|
||||||
|
if len(mountedPods) != 1 {
|
||||||
|
return fmt.Errorf("expected 1 pods to in mounted volume list got %d", len(mountedPods))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
@ -2284,6 +2309,28 @@ func TestSyncStates(t *testing.T) {
|
|||||||
}
|
}
|
||||||
defer os.RemoveAll(tmpKubeletDir)
|
defer os.RemoveAll(tmpKubeletDir)
|
||||||
|
|
||||||
|
pod := &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "pod1",
|
||||||
|
UID: "pod1uid",
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Volumes: []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "volume-name",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||||
|
PDName: "volume-name",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||||
|
podName := util.GetUniquePodName(pod)
|
||||||
|
|
||||||
// create kubelet pod directory
|
// create kubelet pod directory
|
||||||
tmpKubeletPodDir := filepath.Join(tmpKubeletDir, "pods")
|
tmpKubeletPodDir := filepath.Join(tmpKubeletDir, "pods")
|
||||||
os.MkdirAll(tmpKubeletPodDir, 0755)
|
os.MkdirAll(tmpKubeletPodDir, 0755)
|
||||||
@ -2301,11 +2348,27 @@ func TestSyncStates(t *testing.T) {
|
|||||||
|
|
||||||
rc, fakePlugin := getReconciler(tmpKubeletDir, t, mountPaths)
|
rc, fakePlugin := getReconciler(tmpKubeletDir, t, mountPaths)
|
||||||
rcInstance, _ := rc.(*reconciler)
|
rcInstance, _ := rc.(*reconciler)
|
||||||
|
|
||||||
|
if tc.addToDSOW {
|
||||||
|
volumeName, err := rcInstance.desiredStateOfWorld.AddPodToVolume(
|
||||||
|
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error adding volume %s to dsow: %v", volumeSpec.Name(), err)
|
||||||
|
}
|
||||||
|
rcInstance.actualStateOfWorld.MarkVolumeAsAttached(volumeName, volumeSpec, nodeName, "")
|
||||||
|
}
|
||||||
|
|
||||||
rcInstance.syncStates(tmpKubeletPodDir)
|
rcInstance.syncStates(tmpKubeletPodDir)
|
||||||
|
if tc.postSyncStatCallback != nil {
|
||||||
|
err := tc.postSyncStatCallback(rcInstance, fakePlugin)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("test %s, postSyncStatCallback failed: %v", tc.name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if err := tc.verifyFunc(rcInstance, fakePlugin); err != nil {
|
if err := tc.verifyFunc(rcInstance, fakePlugin); err != nil {
|
||||||
t.Errorf("test %s failed: %v", tc.name, err)
|
t.Errorf("test %s failed: %v", tc.name, err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user