mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-25 04:11:46 +00:00
Use CheckAndMarkAsUncertainViaReconstruction for uncertain volumes
Also only remove volumes from skippedDuringReconstruction only if volume was marked as attached.
This commit is contained in:
@@ -181,7 +181,6 @@ func (rc *reconciler) reconcile() {
|
||||
// After running the above operations if skippedDuringReconstruction is not empty
|
||||
// then ensure that all volumes which were discovered and skipped during reconstruction
|
||||
// are added to actualStateOfWorld in uncertain state.
|
||||
// This should be called only ONCE after reconstruction.
|
||||
if len(rc.skippedDuringReconstruction) > 0 {
|
||||
rc.processReconstructedVolumes()
|
||||
}
|
||||
@@ -265,42 +264,43 @@ func (rc *reconciler) mountAttachedVolumes(volumeToMount cache.VolumeToMount, po
|
||||
// But if mount operation fails for some reason then we still need to mark the volume as uncertain
|
||||
// and wait for the next reconciliation loop to deal with it.
|
||||
func (rc *reconciler) processReconstructedVolumes() {
|
||||
if rc.kubeClient != nil {
|
||||
rc.updateDevicePath(rc.skippedDuringReconstruction)
|
||||
}
|
||||
for volumeName, glblVolumeInfo := range rc.skippedDuringReconstruction {
|
||||
// check if volume is marked as attached to the node
|
||||
// for now lets only process volumes which are at least known as attached to the node
|
||||
// this should help with most volume types (including secret, configmap etc)
|
||||
if !rc.actualStateOfWorld.VolumeExists(volumeName) {
|
||||
klog.V(4).InfoS("Volume is not marked as attached to the node. Skipping processing of the volume", "volumeName", volumeName)
|
||||
delete(rc.skippedDuringReconstruction, volumeName)
|
||||
continue
|
||||
}
|
||||
uncertainVolumeCount := 0
|
||||
// only delete volumes which were marked as attached here.
|
||||
// This should ensure that - we will wait for volumes which were not marked as attached
|
||||
// before adding them in uncertain state during reconstruction.
|
||||
delete(rc.skippedDuringReconstruction, volumeName)
|
||||
|
||||
for podName, volume := range glblVolumeInfo.podVolumes {
|
||||
volumeNotMounted := rc.actualStateOfWorld.PodRemovedFromVolume(podName, volume.volumeName)
|
||||
markVolumeOpts := operationexecutor.MarkVolumeOpts{
|
||||
PodName: volume.podName,
|
||||
PodUID: types.UID(podName),
|
||||
VolumeName: volume.volumeName,
|
||||
Mounter: volume.mounter,
|
||||
BlockVolumeMapper: volume.blockVolumeMapper,
|
||||
OuterVolumeSpecName: volume.outerVolumeSpecName,
|
||||
VolumeGidVolume: volume.volumeGidValue,
|
||||
VolumeSpec: volume.volumeSpec,
|
||||
VolumeMountState: operationexecutor.VolumeMountUncertain,
|
||||
}
|
||||
|
||||
volumeAdded, err := rc.actualStateOfWorld.CheckAndMarkVolumeAsUncertainViaReconstruction(markVolumeOpts)
|
||||
|
||||
// if volume is not mounted then lets mark volume mounted in uncertain state in ASOW
|
||||
if volumeNotMounted {
|
||||
markVolumeOpts := operationexecutor.MarkVolumeOpts{
|
||||
PodName: volume.podName,
|
||||
PodUID: types.UID(volume.podName),
|
||||
VolumeName: volume.volumeName,
|
||||
Mounter: volume.mounter,
|
||||
BlockVolumeMapper: volume.blockVolumeMapper,
|
||||
OuterVolumeSpecName: volume.outerVolumeSpecName,
|
||||
VolumeGidVolume: volume.volumeGidValue,
|
||||
VolumeSpec: volume.volumeSpec,
|
||||
VolumeMountState: operationexecutor.VolumeMountUncertain,
|
||||
}
|
||||
err := rc.actualStateOfWorld.AddVolumeViaReconstruction(markVolumeOpts)
|
||||
if volumeAdded {
|
||||
uncertainVolumeCount += 1
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Could not add pod to volume information to actual state of world", "pod", klog.KObj(volume.pod))
|
||||
continue
|
||||
}
|
||||
klog.V(4).InfoS("Volume is marked as mounted and added into the actual state", "pod", klog.KObj(volume.pod), "podName", volume.podName, "volumeName", volume.volumeName)
|
||||
klog.V(4).InfoS("Volume is marked as mounted in uncertain state and added to the actual state", "pod", klog.KObj(volume.pod), "podName", volume.podName, "volumeName", volume.volumeName)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -312,19 +312,13 @@ func (rc *reconciler) processReconstructedVolumes() {
|
||||
klog.ErrorS(err, "Could not find device mount path for volume", "volumeName", glblVolumeInfo.volumeName)
|
||||
continue
|
||||
}
|
||||
currentMountState := rc.actualStateOfWorld.GetDeviceMountState(glblVolumeInfo.volumeName)
|
||||
if currentMountState == operationexecutor.DeviceNotMounted {
|
||||
err = rc.actualStateOfWorld.MarkDeviceAsUncertain(glblVolumeInfo.volumeName, glblVolumeInfo.devicePath, deviceMountPath)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Could not mark device is mounted to actual state of world", "volume", glblVolumeInfo.volumeName)
|
||||
continue
|
||||
}
|
||||
klog.V(4).InfoS("Volume is marked device as mounted and added into the actual state", "volumeName", glblVolumeInfo.volumeName)
|
||||
deviceMounted := rc.actualStateOfWorld.CheckAndMarkDeviceUncertainViaReconstruction(glblVolumeInfo.volumeName, deviceMountPath)
|
||||
if !deviceMounted {
|
||||
klog.V(3).InfoS("Could not mark device as mounted in uncertain state", "volumeName", glblVolumeInfo.volumeName)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
rc.skippedDuringReconstruction = make(map[v1.UniqueVolumeName]*globalVolumeInfo)
|
||||
}
|
||||
|
||||
func (rc *reconciler) waitForVolumeAttach(volumeToMount cache.VolumeToMount) {
|
||||
|
@@ -2202,6 +2202,28 @@ func getFakeNode() *v1.Node {
|
||||
}
|
||||
}
|
||||
|
||||
func getInlineFakePod(podName, podUUID, outerName, innerName string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
UID: k8stypes.UID(podUUID),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: outerName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: innerName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func getReconciler(kubeletDir string, t *testing.T, volumePaths []string) (Reconciler, *volumetesting.FakeVolumePlugin) {
|
||||
node := getFakeNode()
|
||||
volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgrWithNodeAndRoot(t, node, kubeletDir)
|
||||
@@ -2239,11 +2261,23 @@ func getReconciler(kubeletDir string, t *testing.T, volumePaths []string) (Recon
|
||||
}
|
||||
|
||||
func TestSyncStates(t *testing.T) {
|
||||
type podInfo struct {
|
||||
podName string
|
||||
podUID string
|
||||
outerVolumeName string
|
||||
innerVolumeName string
|
||||
}
|
||||
defaultPodInfo := podInfo{
|
||||
podName: "pod1",
|
||||
podUID: "pod1uid",
|
||||
outerVolumeName: "volume-name",
|
||||
innerVolumeName: "volume-name",
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
volumePaths []string
|
||||
createMountPoint bool
|
||||
addToDSOW bool
|
||||
podInfos []podInfo
|
||||
postSyncStatCallback func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error
|
||||
verifyFunc func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error
|
||||
}{
|
||||
@@ -2254,6 +2288,7 @@ func TestSyncStates(t *testing.T) {
|
||||
path.Join("pod2", "volumes", "fake-plugin", "pvc-abcdef"),
|
||||
},
|
||||
createMountPoint: true,
|
||||
podInfos: []podInfo{},
|
||||
verifyFunc: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error {
|
||||
mountedPods := rcInstance.actualStateOfWorld.GetMountedVolumes()
|
||||
if len(mountedPods) != 2 {
|
||||
@@ -2262,12 +2297,33 @@ func TestSyncStates(t *testing.T) {
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when two pods are using same volume and one of them is deleted",
|
||||
volumePaths: []string{
|
||||
path.Join("pod1uid", "volumes", "fake-plugin", "volume-name"),
|
||||
path.Join("pod2uid", "volumes", "fake-plugin", "volume-name"),
|
||||
},
|
||||
createMountPoint: true,
|
||||
podInfos: []podInfo{defaultPodInfo},
|
||||
verifyFunc: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error {
|
||||
// for pod that is deleted, volume is considered as mounted
|
||||
mountedPods := rcInstance.actualStateOfWorld.GetMountedVolumes()
|
||||
if len(mountedPods) != 1 {
|
||||
return fmt.Errorf("expected 1 pods to in asw got %d", len(mountedPods))
|
||||
}
|
||||
if types.UniquePodName("pod2uid") != mountedPods[0].PodName {
|
||||
return fmt.Errorf("expected mounted pod to be %s got %s", "pod2uid", mountedPods[0].PodName)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "when reconstruction fails for a volume, volumes should be cleaned up",
|
||||
volumePaths: []string{
|
||||
path.Join("pod1", "volumes", "fake-plugin", "pvc-abcdef"),
|
||||
},
|
||||
createMountPoint: false,
|
||||
podInfos: []podInfo{},
|
||||
verifyFunc: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error {
|
||||
return retryWithExponentialBackOff(reconcilerSyncWaitDuration, func() (bool, error) {
|
||||
err := volumetesting.VerifyTearDownCallCount(1, fakePlugin)
|
||||
@@ -2284,7 +2340,7 @@ func TestSyncStates(t *testing.T) {
|
||||
path.Join("pod1uid", "volumes", "fake-plugin", "volume-name"),
|
||||
},
|
||||
createMountPoint: true,
|
||||
addToDSOW: true,
|
||||
podInfos: []podInfo{defaultPodInfo},
|
||||
postSyncStatCallback: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error {
|
||||
skippedVolumes := rcInstance.skippedDuringReconstruction
|
||||
if len(skippedVolumes) != 1 {
|
||||
@@ -2303,6 +2359,22 @@ func TestSyncStates(t *testing.T) {
|
||||
if !addedViaReconstruction {
|
||||
return fmt.Errorf("expected volume %s to be marked as added via reconstruction", mountedPodVolume.VolumeName)
|
||||
}
|
||||
|
||||
// check device mount state
|
||||
attachedVolumes := rcInstance.actualStateOfWorld.GetAttachedVolumes()
|
||||
if len(attachedVolumes) != 1 {
|
||||
return fmt.Errorf("expected 1 volume to be unmounted, got %d", len(attachedVolumes))
|
||||
}
|
||||
firstAttachedVolume := attachedVolumes[0]
|
||||
if !firstAttachedVolume.DeviceMayBeMounted() {
|
||||
return fmt.Errorf("expected %s volume to be mounted in uncertain state", firstAttachedVolume.VolumeName)
|
||||
}
|
||||
|
||||
// also skippedVolumes map should be empty
|
||||
skippedVolumes := rcInstance.skippedDuringReconstruction
|
||||
if len(skippedVolumes) > 0 {
|
||||
return fmt.Errorf("expected 0 pods in skipped volumes found %d", len(skippedVolumes))
|
||||
}
|
||||
return nil
|
||||
},
|
||||
},
|
||||
@@ -2315,28 +2387,6 @@ func TestSyncStates(t *testing.T) {
|
||||
}
|
||||
defer os.RemoveAll(tmpKubeletDir)
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "volume-name",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "volume-name",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||
podName := util.GetUniquePodName(pod)
|
||||
|
||||
// create kubelet pod directory
|
||||
tmpKubeletPodDir := filepath.Join(tmpKubeletDir, "pods")
|
||||
os.MkdirAll(tmpKubeletPodDir, 0755)
|
||||
@@ -2355,7 +2405,10 @@ func TestSyncStates(t *testing.T) {
|
||||
rc, fakePlugin := getReconciler(tmpKubeletDir, t, mountPaths)
|
||||
rcInstance, _ := rc.(*reconciler)
|
||||
|
||||
if tc.addToDSOW {
|
||||
for _, tpodInfo := range tc.podInfos {
|
||||
pod := getInlineFakePod(tpodInfo.podName, tpodInfo.podUID, tpodInfo.outerVolumeName, tpodInfo.innerVolumeName)
|
||||
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
|
||||
podName := util.GetUniquePodName(pod)
|
||||
volumeName, err := rcInstance.desiredStateOfWorld.AddPodToVolume(
|
||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
if err != nil {
|
||||
|
Reference in New Issue
Block a user