Remove usage of NewVolumeManagerReconstruction feature gate

This removes lot of code related to "old" VolumeManage reconstruction.
This commit is contained in:
Jan Safranek 2024-02-22 10:06:11 +01:00
parent 513c2a3690
commit 2a2542289f
7 changed files with 2 additions and 670 deletions

View File

@ -34,10 +34,8 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/component-helpers/storage/ephemeral"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
@ -324,7 +322,7 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes(
}
// Add volume to desired state of world
uniqueVolumeName, err := dswp.desiredStateOfWorld.AddPodToVolume(
_, err = dswp.desiredStateOfWorld.AddPodToVolume(
uniquePodName, pod, volumeSpec, podVolume.Name, volumeGidValue, seLinuxContainerContexts[podVolume.Name])
if err != nil {
klog.ErrorS(err, "Failed to add volume to desiredStateOfWorld", "pod", klog.KObj(pod), "volumeName", podVolume.Name, "volumeSpecName", volumeSpec.Name())
@ -333,12 +331,6 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes(
} else {
klog.V(4).InfoS("Added volume to desired state", "pod", klog.KObj(pod), "volumeName", podVolume.Name, "volumeSpecName", volumeSpec.Name())
}
if !utilfeature.DefaultFeatureGate.Enabled(features.NewVolumeManagerReconstruction) {
// sync reconstructed volume. This is necessary only when the old-style reconstruction is still used.
// With reconstruct_new.go, AWS.MarkVolumeAsMounted will update the outer spec name of previously
// uncertain volumes.
dswp.actualStateOfWorld.SyncReconstructedVolume(uniqueVolumeName, uniquePodName, podVolume.Name)
}
dswp.checkVolumeFSResize(pod, podVolume, pvc, volumeSpec, uniquePodName, mountedVolumesForPod)
}

View File

@ -89,78 +89,6 @@ func prepareDswpWithVolume(t *testing.T) (*desiredStateOfWorldPopulator, kubepod
return dswp, fakePodManager, fakePodStateProvider
}
func TestFindAndAddNewPods_WithRescontructedVolume(t *testing.T) {
// Outer volume spec replacement is needed only when the old volume reconstruction is used
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NewVolumeManagerReconstruction, false)()
// create dswp
dswp, fakePodManager, _ := prepareDswpWithVolume(t)
// create pod
fakeOuterVolumeName := "dswp-test-volume-name"
containers := []v1.Container{
{
VolumeMounts: []v1.VolumeMount{
{
Name: fakeOuterVolumeName,
MountPath: "/mnt",
},
},
},
}
pod := createPodWithVolume("dswp-test-pod", fakeOuterVolumeName, "file-bound", containers)
fakePodManager.AddPod(pod)
podName := util.GetUniquePodName(pod)
mode := v1.PersistentVolumeFilesystem
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: fakeOuterVolumeName,
},
Spec: v1.PersistentVolumeSpec{
ClaimRef: &v1.ObjectReference{Namespace: "ns", Name: "file-bound"},
VolumeMode: &mode,
},
}
generatedVolumeName := "fake-plugin/" + pod.Spec.Volumes[0].Name
uniqueVolumeName := v1.UniqueVolumeName(generatedVolumeName)
expectedOuterVolumeName := "dswp-test-volume-name"
opts := operationexecutor.MarkVolumeOpts{
PodName: podName,
PodUID: pod.UID,
VolumeName: uniqueVolumeName,
OuterVolumeSpecName: generatedVolumeName, // fake reconstructed volume
VolumeGidVolume: "",
VolumeSpec: volume.NewSpecFromPersistentVolume(pv, false),
VolumeMountState: operationexecutor.VolumeMounted,
}
logger, _ := ktesting.NewTestContext(t)
dswp.actualStateOfWorld.MarkVolumeAsAttached(logger, opts.VolumeName, opts.VolumeSpec, "fake-node", "")
dswp.actualStateOfWorld.MarkVolumeAsMounted(opts)
dswp.findAndAddNewPods()
mountedVolumes := dswp.actualStateOfWorld.GetMountedVolumesForPod(podName)
found := false
for _, volume := range mountedVolumes {
if volume.OuterVolumeSpecName == expectedOuterVolumeName {
found = true
break
}
}
if dswp.hasAddedPods {
t.Fatalf("HasAddedPod should be false but it is true")
}
if !found {
t.Fatalf(
"Could not found pod volume %v in the list of actual state of world volumes to mount.",
expectedOuterVolumeName)
}
}
func TestFindAndAddNewPods_WithDifferentConditions(t *testing.T) {
tests := []struct {
desc string

View File

@ -1,132 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package reconciler implements interfaces that attempt to reconcile the
// desired state of the world with the actual state of the world by triggering
// relevant actions (attach, detach, mount, unmount).
package reconciler
import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
)
func (rc *reconciler) runOld(stopCh <-chan struct{}) {
wait.Until(rc.reconciliationLoopFunc(), rc.loopSleepDuration, stopCh)
}
func (rc *reconciler) reconciliationLoopFunc() func() {
return func() {
rc.reconcile()
// Sync the state with the reality once after all existing pods are added to the desired state from all sources.
// Otherwise, the reconstruct process may clean up pods' volumes that are still in use because
// desired state of world does not contain a complete list of pods.
if rc.populatorHasAddedPods() && !rc.StatesHasBeenSynced() {
klog.InfoS("Reconciler: start to sync state")
rc.sync()
}
}
}
func (rc *reconciler) reconcile() {
// Unmounts are triggered before mounts so that a volume that was
// referenced by a pod that was deleted and is now referenced by another
// pod is unmounted from the first pod before being mounted to the new
// pod.
rc.unmountVolumes()
// Next we mount required volumes. This function could also trigger
// attach if kubelet is responsible for attaching volumes.
// If underlying PVC was resized while in-use then this function also handles volume
// resizing.
rc.mountOrAttachVolumes()
// Ensure devices that should be detached/unmounted are detached/unmounted.
rc.unmountDetachDevices()
// After running the above operations if skippedDuringReconstruction is not empty
// then ensure that all volumes which were discovered and skipped during reconstruction
// are added to actualStateOfWorld in uncertain state.
if len(rc.skippedDuringReconstruction) > 0 {
rc.processReconstructedVolumes()
}
}
// processReconstructedVolumes checks volumes which were skipped during the reconstruction
// process because it was assumed that since these volumes were present in DSOW they would get
// mounted correctly and make it into ASOW.
// But if mount operation fails for some reason then we still need to mark the volume as uncertain
// and wait for the next reconciliation loop to deal with it.
func (rc *reconciler) processReconstructedVolumes() {
for volumeName, glblVolumeInfo := range rc.skippedDuringReconstruction {
// check if volume is marked as attached to the node
// for now lets only process volumes which are at least known as attached to the node
// this should help with most volume types (including secret, configmap etc)
if !rc.actualStateOfWorld.VolumeExists(volumeName) {
klog.V(4).InfoS("Volume is not marked as attached to the node. Skipping processing of the volume", "volumeName", volumeName)
continue
}
uncertainVolumeCount := 0
// only delete volumes which were marked as attached here.
// This should ensure that - we will wait for volumes which were not marked as attached
// before adding them in uncertain state during reconstruction.
delete(rc.skippedDuringReconstruction, volumeName)
for podName, volume := range glblVolumeInfo.podVolumes {
markVolumeOpts := operationexecutor.MarkVolumeOpts{
PodName: volume.podName,
PodUID: types.UID(podName),
VolumeName: volume.volumeName,
Mounter: volume.mounter,
BlockVolumeMapper: volume.blockVolumeMapper,
OuterVolumeSpecName: volume.outerVolumeSpecName,
VolumeGidVolume: volume.volumeGidValue,
VolumeSpec: volume.volumeSpec,
VolumeMountState: operationexecutor.VolumeMountUncertain,
}
volumeAdded, err := rc.actualStateOfWorld.CheckAndMarkVolumeAsUncertainViaReconstruction(markVolumeOpts)
// if volume is not mounted then lets mark volume mounted in uncertain state in ASOW
if volumeAdded {
uncertainVolumeCount += 1
if err != nil {
klog.ErrorS(err, "Could not add pod to volume information to actual state of world", "pod", klog.KObj(volume.pod))
continue
}
klog.V(4).InfoS("Volume is marked as mounted in uncertain state and added to the actual state", "pod", klog.KObj(volume.pod), "podName", volume.podName, "volumeName", volume.volumeName)
}
}
if uncertainVolumeCount > 0 {
// If the volume has device to mount, we mark its device as uncertain
if glblVolumeInfo.deviceMounter != nil || glblVolumeInfo.blockVolumeMapper != nil {
deviceMountPath, err := getDeviceMountPath(glblVolumeInfo)
if err != nil {
klog.ErrorS(err, "Could not find device mount path for volume", "volumeName", glblVolumeInfo.volumeName)
continue
}
deviceMounted := rc.actualStateOfWorld.CheckAndMarkDeviceUncertainViaReconstruction(glblVolumeInfo.volumeName, deviceMountPath)
if !deviceMounted {
klog.V(3).InfoS("Could not mark device as mounted in uncertain state", "volumeName", glblVolumeInfo.volumeName)
}
}
}
}
}

View File

@ -24,10 +24,8 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
"k8s.io/kubernetes/pkg/util/goroutinemap/exponentialbackoff"
volumepkg "k8s.io/kubernetes/pkg/volume"
@ -149,12 +147,7 @@ type reconciler struct {
}
func (rc *reconciler) Run(stopCh <-chan struct{}) {
if utilfeature.DefaultFeatureGate.Enabled(features.NewVolumeManagerReconstruction) {
rc.runNew(stopCh)
return
}
rc.runOld(stopCh)
rc.runNew(stopCh)
}
func (rc *reconciler) unmountVolumes() {

View File

@ -19,7 +19,6 @@ package reconciler
import (
"crypto/md5"
"fmt"
"os"
"path/filepath"
"testing"
"time"
@ -40,7 +39,6 @@ import (
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
"k8s.io/kubernetes/pkg/volume"
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
@ -2294,250 +2292,3 @@ func getReconciler(kubeletDir string, t *testing.T, volumePaths []string, kubeCl
tmpKubeletPodDir)
return rc, fakePlugin
}
func TestSyncStates(t *testing.T) {
type podInfo struct {
podName string
podUID string
outerVolumeName string
innerVolumeName string
}
defaultPodInfo := podInfo{
podName: "pod1",
podUID: "pod1uid",
outerVolumeName: "volume-name",
innerVolumeName: "volume-name",
}
tests := []struct {
name string
volumePaths []string
createMountPoint bool
podInfos []podInfo
postSyncStatCallback func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error
verifyFunc func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error
}{
{
name: "when two pods are using same volume and both are deleted",
volumePaths: []string{
filepath.Join("pod1", "volumes", "fake-plugin", "pvc-abcdef"),
filepath.Join("pod2", "volumes", "fake-plugin", "pvc-abcdef"),
},
createMountPoint: true,
podInfos: []podInfo{},
verifyFunc: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error {
mountedPods := rcInstance.actualStateOfWorld.GetMountedVolumes()
if len(mountedPods) != 2 {
return fmt.Errorf("expected 2 pods to in asw got %d", len(mountedPods))
}
return nil
},
},
{
name: "when two pods are using same volume and one of them is deleted",
volumePaths: []string{
filepath.Join("pod1uid", "volumes", "fake-plugin", "volume-name"),
filepath.Join("pod2uid", "volumes", "fake-plugin", "volume-name"),
},
createMountPoint: true,
podInfos: []podInfo{
{
podName: "pod2",
podUID: "pod2uid",
outerVolumeName: "volume-name",
innerVolumeName: "volume-name",
},
},
verifyFunc: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error {
// for pod that is deleted, volume is considered as mounted
mountedPods := rcInstance.actualStateOfWorld.GetMountedVolumes()
if len(mountedPods) != 1 {
return fmt.Errorf("expected 1 pods to in asw got %d", len(mountedPods))
}
if types.UniquePodName("pod1uid") != mountedPods[0].PodName {
return fmt.Errorf("expected mounted pod to be %s got %s", "pod1uid", mountedPods[0].PodName)
}
// for pod that is in dsw, volume is in skippedDuringReconstruction
skippedVolumes := rcInstance.skippedDuringReconstruction
if len(skippedVolumes) != 1 {
return fmt.Errorf("expected 1 pods to in skippedDuringReconstruction got %d", len(skippedVolumes))
}
if skippedVolumes["fake-plugin/volume-name"] == nil {
return fmt.Errorf("expected %s is in skippedDuringReconstruction, got %+v", "fake-plugin/volume-name", skippedVolumes)
}
return nil
},
},
{
name: "when reconstruction fails for a volume, volumes should be cleaned up",
volumePaths: []string{
filepath.Join("pod1", "volumes", "fake-plugin", volumetesting.FailNewMounter),
},
createMountPoint: true,
podInfos: []podInfo{},
verifyFunc: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error {
return retryWithExponentialBackOff(reconcilerSyncWaitDuration, func() (bool, error) {
err := volumetesting.VerifyTearDownCallCount(1, fakePlugin)
if err != nil {
return false, nil
}
return true, nil
})
},
},
{
name: "when mount point does not exist, reconstruction should not fail, volumes should be added in asw",
volumePaths: []string{
filepath.Join("pod1", "volumes", "fake-plugin", "pvc-abcdef"),
},
createMountPoint: false,
podInfos: []podInfo{},
verifyFunc: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error {
mountedPods := rcInstance.actualStateOfWorld.GetMountedVolumes()
if len(mountedPods) != 1 {
return fmt.Errorf("expected 1 pods to in asw got %d", len(mountedPods))
}
return nil
},
},
{
name: "when mount point does not exist, reconstruction should not fail, if volume exists in dsw, volume should be recorded in skipped during reconstruction",
volumePaths: []string{
filepath.Join("pod1uid", "volumes", "fake-plugin", "volume-name"),
},
createMountPoint: false,
podInfos: []podInfo{defaultPodInfo},
postSyncStatCallback: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error {
skippedVolumes := rcInstance.skippedDuringReconstruction
if len(skippedVolumes) != 1 {
return fmt.Errorf("expected 1 pods to in skippedDuringReconstruction got %d", len(skippedVolumes))
}
rcInstance.processReconstructedVolumes()
return nil
},
verifyFunc: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error {
mountedPods := rcInstance.actualStateOfWorld.GetAllMountedVolumes()
if len(mountedPods) != 1 {
return fmt.Errorf("expected 1 pods to in mounted volume list got %d", len(mountedPods))
}
mountedPodVolume := mountedPods[0]
addedViaReconstruction := rcInstance.actualStateOfWorld.IsVolumeReconstructed(mountedPodVolume.VolumeName, mountedPodVolume.PodName)
if !addedViaReconstruction {
return fmt.Errorf("expected volume %s to be marked as added via reconstruction", mountedPodVolume.VolumeName)
}
// check device mount state
attachedVolumes := rcInstance.actualStateOfWorld.GetAttachedVolumes()
if len(attachedVolumes) != 1 {
return fmt.Errorf("expected 1 volume to be unmounted, got %d", len(attachedVolumes))
}
firstAttachedVolume := attachedVolumes[0]
if !firstAttachedVolume.DeviceMayBeMounted() {
return fmt.Errorf("expected %s volume to be mounted in uncertain state", firstAttachedVolume.VolumeName)
}
// also skippedVolumes map should be empty
skippedVolumes := rcInstance.skippedDuringReconstruction
if len(skippedVolumes) > 0 {
return fmt.Errorf("expected 0 pods in skipped volumes found %d", len(skippedVolumes))
}
return nil
},
},
{
name: "when volume exists in dsow, volume should be recorded in skipped during reconstruction",
volumePaths: []string{
filepath.Join("pod1uid", "volumes", "fake-plugin", "volume-name"),
},
createMountPoint: true,
podInfos: []podInfo{defaultPodInfo},
postSyncStatCallback: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error {
skippedVolumes := rcInstance.skippedDuringReconstruction
if len(skippedVolumes) != 1 {
return fmt.Errorf("expected 1 pods to in skippedDuringReconstruction got %d", len(skippedVolumes))
}
rcInstance.processReconstructedVolumes()
return nil
},
verifyFunc: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error {
mountedPods := rcInstance.actualStateOfWorld.GetAllMountedVolumes()
if len(mountedPods) != 1 {
return fmt.Errorf("expected 1 pods to in mounted volume list got %d", len(mountedPods))
}
mountedPodVolume := mountedPods[0]
addedViaReconstruction := rcInstance.actualStateOfWorld.IsVolumeReconstructed(mountedPodVolume.VolumeName, mountedPodVolume.PodName)
if !addedViaReconstruction {
return fmt.Errorf("expected volume %s to be marked as added via reconstruction", mountedPodVolume.VolumeName)
}
// check device mount state
attachedVolumes := rcInstance.actualStateOfWorld.GetAttachedVolumes()
if len(attachedVolumes) != 1 {
return fmt.Errorf("expected 1 volume to be unmounted, got %d", len(attachedVolumes))
}
firstAttachedVolume := attachedVolumes[0]
if !firstAttachedVolume.DeviceMayBeMounted() {
return fmt.Errorf("expected %s volume to be mounted in uncertain state", firstAttachedVolume.VolumeName)
}
// also skippedVolumes map should be empty
skippedVolumes := rcInstance.skippedDuringReconstruction
if len(skippedVolumes) > 0 {
return fmt.Errorf("expected 0 pods in skipped volumes found %d", len(skippedVolumes))
}
return nil
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
tmpKubeletDir, err := os.MkdirTemp("", "")
if err != nil {
t.Fatalf("can't make a temp directory for kubeletPods: %v", err)
}
defer os.RemoveAll(tmpKubeletDir)
// create kubelet pod directory
tmpKubeletPodDir := filepath.Join(tmpKubeletDir, "pods")
os.MkdirAll(tmpKubeletPodDir, 0755)
mountPaths := []string{}
// create pod and volume directories so as reconciler can find them.
for _, volumePath := range tc.volumePaths {
vp := filepath.Join(tmpKubeletPodDir, volumePath)
if tc.createMountPoint {
mountPaths = append(mountPaths, vp)
}
os.MkdirAll(vp, 0755)
}
rc, fakePlugin := getReconciler(tmpKubeletDir, t, mountPaths, nil /*custom kubeclient*/)
rcInstance, _ := rc.(*reconciler)
logger, _ := ktesting.NewTestContext(t)
for _, tpodInfo := range tc.podInfos {
pod := getInlineFakePod(tpodInfo.podName, tpodInfo.podUID, tpodInfo.outerVolumeName, tpodInfo.innerVolumeName)
volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
podName := util.GetUniquePodName(pod)
volumeName, err := rcInstance.desiredStateOfWorld.AddPodToVolume(
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */, nil /* SELinuxContext */)
if err != nil {
t.Fatalf("error adding volume %s to dsow: %v", volumeSpec.Name(), err)
}
rcInstance.actualStateOfWorld.MarkVolumeAsAttached(logger, volumeName, volumeSpec, nodeName, "")
}
rcInstance.syncStates(tmpKubeletPodDir)
if tc.postSyncStatCallback != nil {
err := tc.postSyncStatCallback(rcInstance, fakePlugin)
if err != nil {
t.Errorf("test %s, postSyncStatCallback failed: %v", tc.name, err)
}
}
if err := tc.verifyFunc(rcInstance, fakePlugin); err != nil {
t.Errorf("test %s failed: %v", tc.name, err)
}
})
}
}

View File

@ -1,192 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package reconciler
import (
"context"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/volume/util/nestedpendingoperations"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
)
// sync process tries to observe the real world by scanning all pods' volume directories from the disk.
// If the actual and desired state of worlds are not consistent with the observed world, it means that some
// mounted volumes are left out probably during kubelet restart. This process will reconstruct
// the volumes and update the actual and desired states. For the volumes that cannot support reconstruction,
// it will try to clean up the mount paths with operation executor.
func (rc *reconciler) sync() {
defer rc.updateLastSyncTime()
rc.syncStates(rc.kubeletPodsDir)
}
// syncStates scans the volume directories under the given pod directory.
// If the volume is not in desired state of world, this function will reconstruct
// the volume related information and put it in both the actual and desired state of worlds.
// For some volume plugins that cannot support reconstruction, it will clean up the existing
// mount points since the volume is no long needed (removed from desired state)
func (rc *reconciler) syncStates(kubeletPodDir string) {
// Get volumes information by reading the pod's directory
podVolumes, err := getVolumesFromPodDir(kubeletPodDir)
if err != nil {
klog.ErrorS(err, "Cannot get volumes from disk, skip sync states for volume reconstruction")
return
}
volumesNeedUpdate := make(map[v1.UniqueVolumeName]*globalVolumeInfo)
volumeNeedReport := []v1.UniqueVolumeName{}
for _, volume := range podVolumes {
if rc.actualStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName) {
klog.V(4).InfoS("Volume exists in actual state, skip cleaning up mounts", "podName", volume.podName, "volumeSpecName", volume.volumeSpecName)
// There is nothing to reconstruct
continue
}
volumeInDSW := rc.desiredStateOfWorld.VolumeExistsWithSpecName(volume.podName, volume.volumeSpecName)
reconstructedVolume, err := rc.reconstructVolume(volume)
if err != nil {
if volumeInDSW {
// Some pod needs the volume, don't clean it up and hope that
// reconcile() calls SetUp and reconstructs the volume in ASW.
klog.V(4).InfoS("Volume exists in desired state, skip cleaning up mounts", "podName", volume.podName, "volumeSpecName", volume.volumeSpecName)
continue
}
// No pod needs the volume.
klog.InfoS("Could not construct volume information, cleaning up mounts", "podName", volume.podName, "volumeSpecName", volume.volumeSpecName, "err", err)
rc.cleanupMounts(volume)
continue
}
gvl := &globalVolumeInfo{
volumeName: reconstructedVolume.volumeName,
volumeSpec: reconstructedVolume.volumeSpec,
devicePath: reconstructedVolume.devicePath,
deviceMounter: reconstructedVolume.deviceMounter,
blockVolumeMapper: reconstructedVolume.blockVolumeMapper,
mounter: reconstructedVolume.mounter,
}
if volumeInDSW {
// Some pod needs the volume. And it exists on disk. Some previous
// kubelet must have created the directory, therefore it must have
// reported the volume as in use. Mark the volume as in use also in
// this new kubelet so reconcile() calls SetUp and re-mounts the
// volume if it's necessary.
volumeNeedReport = append(volumeNeedReport, reconstructedVolume.volumeName)
if cachedInfo, ok := rc.skippedDuringReconstruction[reconstructedVolume.volumeName]; ok {
gvl = cachedInfo
}
gvl.addPodVolume(reconstructedVolume)
rc.skippedDuringReconstruction[reconstructedVolume.volumeName] = gvl
klog.V(4).InfoS("Volume exists in desired state, marking as InUse", "podName", volume.podName, "volumeSpecName", volume.volumeSpecName)
continue
}
// There is no pod that uses the volume.
if rc.operationExecutor.IsOperationPending(reconstructedVolume.volumeName, nestedpendingoperations.EmptyUniquePodName, nestedpendingoperations.EmptyNodeName) {
klog.InfoS("Volume is in pending operation, skip cleaning up mounts")
}
klog.V(2).InfoS("Reconciler sync states: could not find pod information in desired state, update it in actual state", "reconstructedVolume", reconstructedVolume)
if cachedInfo, ok := volumesNeedUpdate[reconstructedVolume.volumeName]; ok {
gvl = cachedInfo
}
gvl.addPodVolume(reconstructedVolume)
volumesNeedUpdate[reconstructedVolume.volumeName] = gvl
}
if len(volumesNeedUpdate) > 0 {
if err = rc.updateStates(volumesNeedUpdate); err != nil {
klog.ErrorS(err, "Error occurred during reconstruct volume from disk")
}
}
if len(volumeNeedReport) > 0 {
rc.desiredStateOfWorld.MarkVolumesReportedInUse(volumeNeedReport)
}
}
// updateDevicePath gets the node status to retrieve volume device path information.
func (rc *reconciler) updateDevicePath(volumesNeedUpdate map[v1.UniqueVolumeName]*globalVolumeInfo) {
node, fetchErr := rc.kubeClient.CoreV1().Nodes().Get(context.TODO(), string(rc.nodeName), metav1.GetOptions{})
if fetchErr != nil {
klog.ErrorS(fetchErr, "UpdateStates in reconciler: could not get node status with error")
} else {
for _, attachedVolume := range node.Status.VolumesAttached {
if volume, exists := volumesNeedUpdate[attachedVolume.Name]; exists {
volume.devicePath = attachedVolume.DevicePath
volumesNeedUpdate[attachedVolume.Name] = volume
klog.V(4).InfoS("Update devicePath from node status for volume", "volumeName", attachedVolume.Name, "path", volume.devicePath)
}
}
}
}
func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*globalVolumeInfo) error {
// Get the node status to retrieve volume device path information.
// Skip reporting devicePath in node objects if kubeClient is nil.
// In standalone mode, kubelet is not expected to mount any attachable volume types or secret, configmaps etc.
if rc.kubeClient != nil {
rc.updateDevicePath(volumesNeedUpdate)
}
for _, gvl := range volumesNeedUpdate {
err := rc.actualStateOfWorld.MarkVolumeAsAttached(
//TODO: the devicePath might not be correct for some volume plugins: see issue #54108
klog.TODO(), gvl.volumeName, gvl.volumeSpec, rc.nodeName, gvl.devicePath)
if err != nil {
klog.ErrorS(err, "Could not add volume information to actual state of world", "volumeName", gvl.volumeName)
continue
}
for _, volume := range gvl.podVolumes {
err = rc.markVolumeState(volume, operationexecutor.VolumeMounted)
if err != nil {
klog.ErrorS(err, "Could not add pod to volume information to actual state of world", "pod", klog.KObj(volume.pod))
continue
}
klog.V(2).InfoS("Volume is marked as mounted and added into the actual state", "pod", klog.KObj(volume.pod), "podName", volume.podName, "volumeName", volume.volumeName)
}
// If the volume has device to mount, we mark its device as mounted.
if gvl.deviceMounter != nil || gvl.blockVolumeMapper != nil {
deviceMountPath, err := getDeviceMountPath(gvl)
if err != nil {
klog.ErrorS(err, "Could not find device mount path for volume", "volumeName", gvl.volumeName)
continue
}
err = rc.actualStateOfWorld.MarkDeviceAsMounted(gvl.volumeName, gvl.devicePath, deviceMountPath, "")
if err != nil {
klog.ErrorS(err, "Could not mark device is mounted to actual state of world", "volume", gvl.volumeName)
continue
}
klog.V(2).InfoS("Volume is marked device as mounted and added into the actual state", "volumeName", gvl.volumeName)
}
}
return nil
}
func (rc *reconciler) markVolumeState(volume *reconstructedVolume, volumeState operationexecutor.VolumeMountState) error {
markVolumeOpts := operationexecutor.MarkVolumeOpts{
PodName: volume.podName,
PodUID: types.UID(volume.podName),
VolumeName: volume.volumeName,
Mounter: volume.mounter,
BlockVolumeMapper: volume.blockVolumeMapper,
OuterVolumeSpecName: volume.outerVolumeSpecName,
VolumeGidVolume: volume.volumeGidValue,
VolumeSpec: volume.volumeSpec,
VolumeMountState: volumeState,
}
err := rc.actualStateOfWorld.MarkVolumeAsMounted(markVolumeOpts)
return err
}

View File

@ -26,18 +26,13 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/klog/v2/ktesting"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util"
)
func TestReconstructVolumes(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NewVolumeManagerReconstruction, true)()
tests := []struct {
name string
volumePaths []string
@ -148,8 +143,6 @@ func TestReconstructVolumes(t *testing.T) {
}
func TestCleanOrphanVolumes(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NewVolumeManagerReconstruction, true)()
type podInfo struct {
podName string
podUID string
@ -263,7 +256,6 @@ func TestReconstructVolumesMount(t *testing.T) {
// Since the volume is reconstructed, it must be marked as uncertain
// even after a final SetUp error, see https://github.com/kubernetes/kubernetes/issues/96635
// and https://github.com/kubernetes/kubernetes/pull/110670.
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NewVolumeManagerReconstruction, true)()
tests := []struct {
name string