Merge pull request #81429 from huffmanca/resize_block_volume

Enables resizing of block volumes.
This commit is contained in:
Kubernetes Prow Robot 2019-08-23 17:59:05 -07:00 committed by GitHub
commit f105fef3d5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 508 additions and 330 deletions

View File

@ -11,7 +11,6 @@ go_library(
srcs = ["desired_state_of_world_populator.go"],
importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/populator",
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubelet/config:go_default_library",
"//pkg/kubelet/container:go_default_library",

View File

@ -35,7 +35,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
@ -384,22 +383,15 @@ func (dswp *desiredStateOfWorldPopulator) checkVolumeFSResize(
// or online resize in subsequent loop(after we confirm it has been mounted).
return
}
fsVolume, err := util.CheckVolumeModeFilesystem(volumeSpec)
if err != nil {
klog.Errorf("Check volume mode failed for volume %s(OuterVolumeSpecName %s): %v",
uniqueVolumeName, podVolume.Name, err)
return
}
if !fsVolume {
klog.V(5).Infof("Block mode volume needn't to check file system resize request")
return
}
if processedVolumesForFSResize.Has(string(uniqueVolumeName)) {
// File system resize operation is a global operation for volume,
// so we only need to check it once if more than one pod use it.
return
}
if mountedReadOnlyByPod(podVolume, pod) {
// volumeSpec.ReadOnly is the value that determines if volume could be formatted when being mounted.
// This is the same flag that determines filesystem resizing behaviour for offline resizing and hence
// we should use it here. This value comes from Pod.spec.volumes.persistentVolumeClaim.readOnly.
if volumeSpec.ReadOnly {
// This volume is used as read only by this pod, we don't perform resize for read only volumes.
klog.V(5).Infof("Skip file system resize check for volume %s in pod %s/%s "+
"as the volume is mounted as readonly", podVolume.Name, pod.Namespace, pod.Name)
@ -411,28 +403,6 @@ func (dswp *desiredStateOfWorldPopulator) checkVolumeFSResize(
processedVolumesForFSResize.Insert(string(uniqueVolumeName))
}
func mountedReadOnlyByPod(podVolume v1.Volume, pod *v1.Pod) bool {
if podVolume.PersistentVolumeClaim.ReadOnly {
return true
}
return podutil.VisitContainers(&pod.Spec, func(c *v1.Container) bool {
if !mountedReadOnlyByContainer(podVolume.Name, c) {
return false
}
return true
})
}
func mountedReadOnlyByContainer(volumeName string, container *v1.Container) bool {
for _, volumeMount := range container.VolumeMounts {
if volumeMount.Name == volumeName && !volumeMount.ReadOnly {
return false
}
}
return true
}
func getUniqueVolumeName(
podName volumetypes.UniquePodName,
outerVolumeSpecName string,

View File

@ -568,8 +568,6 @@ func TestCreateVolumeSpec_Invalid_Block_VolumeMounts(t *testing.T) {
}
func TestCheckVolumeFSResize(t *testing.T) {
mode := v1.PersistentVolumeFilesystem
setCapacity := func(pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim, capacity int) {
pv.Spec.Capacity = volumeCapacity(capacity)
pvc.Spec.Resources.Requests = volumeCapacity(capacity)
@ -580,6 +578,7 @@ func TestCheckVolumeFSResize(t *testing.T) {
verify func(*testing.T, []v1.UniqueVolumeName, v1.UniqueVolumeName)
enableResize bool
readOnlyVol bool
volumeMode v1.PersistentVolumeMode
}{
{
// No resize request for volume, volumes in ASW shouldn't be marked as fsResizeRequired
@ -591,6 +590,7 @@ func TestCheckVolumeFSResize(t *testing.T) {
}
},
enableResize: true,
volumeMode: v1.PersistentVolumeFilesystem,
},
{
// Disable the feature gate, so volume shouldn't be marked as fsResizeRequired
@ -603,6 +603,7 @@ func TestCheckVolumeFSResize(t *testing.T) {
}
},
enableResize: false,
volumeMode: v1.PersistentVolumeFilesystem,
},
{
// Make volume used as ReadOnly, so volume shouldn't be marked as fsResizeRequired
@ -616,6 +617,7 @@ func TestCheckVolumeFSResize(t *testing.T) {
},
readOnlyVol: true,
enableResize: true,
volumeMode: v1.PersistentVolumeFilesystem,
},
{
// Clear ASW, so volume shouldn't be marked as fsResizeRequired because they are not mounted
@ -629,6 +631,7 @@ func TestCheckVolumeFSResize(t *testing.T) {
}
},
enableResize: true,
volumeMode: v1.PersistentVolumeFilesystem,
},
{
// volume in ASW should be marked as fsResizeRequired
@ -647,6 +650,26 @@ func TestCheckVolumeFSResize(t *testing.T) {
}
},
enableResize: true,
volumeMode: v1.PersistentVolumeFilesystem,
},
{
// volume in ASW should be marked as fsResizeRequired
resize: func(_ *testing.T, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim, _ *desiredStateOfWorldPopulator) {
setCapacity(pv, pvc, 2)
},
verify: func(t *testing.T, vols []v1.UniqueVolumeName, volName v1.UniqueVolumeName) {
if len(vols) == 0 {
t.Fatalf("Request resize for volume, but volume in ASW hasn't been marked as fsResizeRequired")
}
if len(vols) != 1 {
t.Errorf("Some unexpected volumes are marked as fsResizeRequired: %v", vols)
}
if vols[0] != volName {
t.Fatalf("Mark wrong volume as fsResizeRequired: %s", vols[0])
}
},
enableResize: true,
volumeMode: v1.PersistentVolumeBlock,
},
}
@ -659,7 +682,7 @@ func TestCheckVolumeFSResize(t *testing.T) {
PersistentVolumeSource: v1.PersistentVolumeSource{RBD: &v1.RBDPersistentVolumeSource{}},
Capacity: volumeCapacity(1),
ClaimRef: &v1.ObjectReference{Namespace: "ns", Name: "file-bound"},
VolumeMode: &mode,
VolumeMode: &tc.volumeMode,
},
}
pvc := &v1.PersistentVolumeClaim{
@ -677,10 +700,10 @@ func TestCheckVolumeFSResize(t *testing.T) {
dswp, fakePodManager, fakeDSW := createDswpWithVolume(t, pv, pvc)
fakeASW := dswp.actualStateOfWorld
containers := []v1.Container{}
// create pod
containers := []v1.Container{
{
if tc.volumeMode == v1.PersistentVolumeFilesystem {
containers = append(containers, v1.Container{
VolumeMounts: []v1.VolumeMount{
{
Name: pv.Name,
@ -688,9 +711,20 @@ func TestCheckVolumeFSResize(t *testing.T) {
ReadOnly: tc.readOnlyVol,
},
},
},
})
} else {
containers = append(containers, v1.Container{
VolumeDevices: []v1.VolumeDevice{
{
Name: pv.Name,
DevicePath: "/mnt/foobar",
},
},
})
}
pod := createPodWithVolume("dswp-test-pod", "dswp-test-volume-name", "file-bound", containers)
pod.Spec.Volumes[0].VolumeSource.PersistentVolumeClaim.ReadOnly = tc.readOnlyVol
uniquePodName := types.UniquePodName(pod.UID)
uniqueVolumeName := v1.UniqueVolumeName("fake-plugin/" + pod.Spec.Volumes[0].Name)

View File

@ -442,11 +442,47 @@ func Test_Run_Positive_VolumeAttachAndMap(t *testing.T) {
// Enable BlockVolume feature gate
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, true)()
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
UID: "pod1uid",
Namespace: "ns",
},
Spec: v1.PodSpec{},
}
mode := v1.PersistentVolumeBlock
gcepv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
Spec: v1.PersistentVolumeSpec{
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
},
VolumeMode: &mode,
ClaimRef: &v1.ObjectReference{Namespace: "ns", Name: "pvc-volume-name"},
},
}
gcepvc := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{UID: "pvc-001", Name: "pvc-volume-name", Namespace: "ns"},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "volume-name",
VolumeMode: &mode,
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
Capacity: gcepv.Spec.Capacity,
},
}
// Arrange
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
kubeClient := createTestClient()
kubeClient := createtestClientWithPVPVC(gcepv, gcepvc)
fakeRecorder := &record.FakeRecorder{}
fakeHandler := volumetesting.NewBlockVolumePathHandler()
oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
@ -469,27 +505,6 @@ func Test_Run_Positive_VolumeAttachAndMap(t *testing.T) {
&mount.FakeHostUtil{},
volumePluginMgr,
kubeletPodsDir)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
UID: "pod1uid",
},
Spec: v1.PodSpec{},
}
mode := v1.PersistentVolumeBlock
gcepv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
Spec: v1.PersistentVolumeSpec{
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
},
VolumeMode: &mode,
},
}
volumeSpec := &volume.Spec{
PersistentVolume: gcepv,
@ -527,11 +542,53 @@ func Test_Run_Positive_BlockVolumeMapControllerAttachEnabled(t *testing.T) {
// Enable BlockVolume feature gate
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, true)()
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
UID: "pod1uid",
Namespace: "ns",
},
Spec: v1.PodSpec{},
}
mode := v1.PersistentVolumeBlock
gcepv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
Spec: v1.PersistentVolumeSpec{
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
},
VolumeMode: &mode,
ClaimRef: &v1.ObjectReference{Namespace: "ns", Name: "pvc-volume-name"},
},
}
gcepvc := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{UID: "pvc-001", Name: "pvc-volume-name", Namespace: "ns"},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "volume-name",
VolumeMode: &mode,
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
Capacity: gcepv.Spec.Capacity,
},
}
volumeSpec := &volume.Spec{
PersistentVolume: gcepv,
}
// Arrange
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
kubeClient := createTestClient()
kubeClient := createtestClientWithPVPVC(gcepv, gcepvc, v1.AttachedVolume{
Name: "fake-plugin/fake-device1",
DevicePath: "/fake/path",
})
fakeRecorder := &record.FakeRecorder{}
fakeHandler := volumetesting.NewBlockVolumePathHandler()
oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
@ -554,31 +611,7 @@ func Test_Run_Positive_BlockVolumeMapControllerAttachEnabled(t *testing.T) {
&mount.FakeHostUtil{},
volumePluginMgr,
kubeletPodsDir)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
UID: "pod1uid",
},
Spec: v1.PodSpec{},
}
mode := v1.PersistentVolumeBlock
gcepv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
Spec: v1.PersistentVolumeSpec{
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
},
VolumeMode: &mode,
},
}
volumeSpec := &volume.Spec{
PersistentVolume: gcepv,
}
podName := util.GetUniquePodName(pod)
generatedVolumeName, err := dsw.AddPodToVolume(
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
@ -613,11 +646,50 @@ func Test_Run_Positive_BlockVolumeAttachMapUnmapDetach(t *testing.T) {
// Enable BlockVolume feature gate
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, true)()
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
UID: "pod1uid",
Namespace: "ns",
},
Spec: v1.PodSpec{},
}
mode := v1.PersistentVolumeBlock
gcepv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
Spec: v1.PersistentVolumeSpec{
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
},
VolumeMode: &mode,
ClaimRef: &v1.ObjectReference{Namespace: "ns", Name: "pvc-volume-name"},
},
}
gcepvc := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{UID: "pvc-001", Name: "pvc-volume-name", Namespace: "ns"},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "volume-name",
VolumeMode: &mode,
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
Capacity: gcepv.Spec.Capacity,
},
}
volumeSpec := &volume.Spec{
PersistentVolume: gcepv,
}
// Arrange
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
kubeClient := createTestClient()
kubeClient := createtestClientWithPVPVC(gcepv, gcepvc)
fakeRecorder := &record.FakeRecorder{}
fakeHandler := volumetesting.NewBlockVolumePathHandler()
oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
@ -640,31 +712,7 @@ func Test_Run_Positive_BlockVolumeAttachMapUnmapDetach(t *testing.T) {
&mount.FakeHostUtil{},
volumePluginMgr,
kubeletPodsDir)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
UID: "pod1uid",
},
Spec: v1.PodSpec{},
}
mode := v1.PersistentVolumeBlock
gcepv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
Spec: v1.PersistentVolumeSpec{
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
},
VolumeMode: &mode,
},
}
volumeSpec := &volume.Spec{
PersistentVolume: gcepv,
}
podName := util.GetUniquePodName(pod)
generatedVolumeName, err := dsw.AddPodToVolume(
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
@ -709,11 +757,53 @@ func Test_Run_Positive_VolumeUnmapControllerAttachEnabled(t *testing.T) {
// Enable BlockVolume feature gate
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, true)()
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
UID: "pod1uid",
Namespace: "ns",
},
Spec: v1.PodSpec{},
}
mode := v1.PersistentVolumeBlock
gcepv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
Spec: v1.PersistentVolumeSpec{
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
},
VolumeMode: &mode,
ClaimRef: &v1.ObjectReference{Namespace: "ns", Name: "pvc-volume-name"},
},
}
gcepvc := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{UID: "pvc-001", Name: "pvc-volume-name", Namespace: "ns"},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "volume-name",
VolumeMode: &mode,
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
Capacity: gcepv.Spec.Capacity,
},
}
volumeSpec := &volume.Spec{
PersistentVolume: gcepv,
}
// Arrange
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
kubeClient := createTestClient()
kubeClient := createtestClientWithPVPVC(gcepv, gcepvc, v1.AttachedVolume{
Name: "fake-plugin/fake-device1",
DevicePath: "/fake/path",
})
fakeRecorder := &record.FakeRecorder{}
fakeHandler := volumetesting.NewBlockVolumePathHandler()
oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
@ -736,31 +826,7 @@ func Test_Run_Positive_VolumeUnmapControllerAttachEnabled(t *testing.T) {
&mount.FakeHostUtil{},
volumePluginMgr,
kubeletPodsDir)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
UID: "pod1uid",
},
Spec: v1.PodSpec{},
}
mode := v1.PersistentVolumeBlock
gcepv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
Spec: v1.PersistentVolumeSpec{
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
},
VolumeMode: &mode,
},
}
volumeSpec := &volume.Spec{
PersistentVolume: gcepv,
}
podName := util.GetUniquePodName(pod)
generatedVolumeName, err := dsw.AddPodToVolume(
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
@ -948,113 +1014,132 @@ func Test_GenerateUnmapDeviceFunc_Plugin_Not_Found(t *testing.T) {
// Verifies volume's fsResizeRequired flag is cleared later.
func Test_Run_Positive_VolumeFSResizeControllerAttachEnabled(t *testing.T) {
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExpandInUsePersistentVolumes, true)()
blockMode := v1.PersistentVolumeBlock
fsMode := v1.PersistentVolumeFilesystem
fs := v1.PersistentVolumeFilesystem
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "pv",
UID: "pvuid",
var tests = []struct {
name string
volumeMode *v1.PersistentVolumeMode
}{
{
name: "expand-fs-volume",
volumeMode: &fsMode,
},
Spec: v1.PersistentVolumeSpec{
ClaimRef: &v1.ObjectReference{Name: "pvc"},
VolumeMode: &fs,
{
name: "expand-raw-block",
volumeMode: &blockMode,
},
}
pvc := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc",
UID: "pvcuid",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pv",
VolumeMode: &fs,
},
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
UID: "pod1uid",
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "volume-name",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "pv",
UID: "pvuid",
},
Spec: v1.PersistentVolumeSpec{
ClaimRef: &v1.ObjectReference{Name: "pvc"},
VolumeMode: tc.volumeMode,
},
}
pvc := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "pvc",
UID: "pvcuid",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pv",
VolumeMode: tc.volumeMode,
},
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
UID: "pod1uid",
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "volume-name",
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
},
},
},
},
},
},
},
}
}
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
kubeClient := createtestClientWithPVPVC(pv, pvc)
fakeRecorder := &record.FakeRecorder{}
fakeHandler := volumetesting.NewBlockVolumePathHandler()
oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
kubeClient,
volumePluginMgr,
fakeRecorder,
false, /* checkNodeCapabilitiesBeforeMount */
fakeHandler))
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
kubeClient := createtestClientWithPVPVC(pv, pvc)
fakeRecorder := &record.FakeRecorder{}
fakeHandler := volumetesting.NewBlockVolumePathHandler()
oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
kubeClient,
volumePluginMgr,
fakeRecorder,
false, /* checkNodeCapabilitiesBeforeMount */
fakeHandler))
reconciler := NewReconciler(
kubeClient,
true, /* controllerAttachDetachEnabled */
reconcilerLoopSleepDuration,
waitForAttachTimeout,
nodeName,
dsw,
asw,
hasAddedPods,
oex,
&mount.FakeMounter{},
&mount.FakeHostUtil{},
volumePluginMgr,
kubeletPodsDir)
reconciler := NewReconciler(
kubeClient,
true, /* controllerAttachDetachEnabled */
reconcilerLoopSleepDuration,
waitForAttachTimeout,
nodeName,
dsw,
asw,
hasAddedPods,
oex,
&mount.FakeMounter{},
&mount.FakeHostUtil{},
volumePluginMgr,
kubeletPodsDir)
volumeSpec := &volume.Spec{PersistentVolume: pv}
podName := util.GetUniquePodName(pod)
volumeName, err := dsw.AddPodToVolume(
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
// Assert
if err != nil {
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
}
dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{volumeName})
volumeSpec := &volume.Spec{PersistentVolume: pv}
podName := util.GetUniquePodName(pod)
volumeName, err := dsw.AddPodToVolume(
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
// Assert
if err != nil {
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
}
dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{volumeName})
// Start the reconciler to fill ASW.
stopChan, stoppedChan := make(chan struct{}), make(chan struct{})
go func() {
reconciler.Run(stopChan)
close(stoppedChan)
}()
waitForMount(t, fakePlugin, volumeName, asw)
// Stop the reconciler.
close(stopChan)
<-stoppedChan
// Start the reconciler to fill ASW.
stopChan, stoppedChan := make(chan struct{}), make(chan struct{})
go func() {
reconciler.Run(stopChan)
close(stoppedChan)
}()
waitForMount(t, fakePlugin, volumeName, asw)
// Stop the reconciler.
close(stopChan)
<-stoppedChan
// Mark volume as fsResizeRequired.
asw.MarkFSResizeRequired(volumeName, podName)
_, _, podExistErr := asw.PodExistsInVolume(podName, volumeName)
if !cache.IsFSResizeRequiredError(podExistErr) {
t.Fatalf("Volume should be marked as fsResizeRequired, but receive unexpected error: %v", podExistErr)
}
// Mark volume as fsResizeRequired.
asw.MarkFSResizeRequired(volumeName, podName)
_, _, podExistErr := asw.PodExistsInVolume(podName, volumeName)
if !cache.IsFSResizeRequiredError(podExistErr) {
t.Fatalf("Volume should be marked as fsResizeRequired, but receive unexpected error: %v", podExistErr)
}
// Start the reconciler again, we hope reconciler will perform the
// resize operation and clear the fsResizeRequired flag for volume.
go reconciler.Run(wait.NeverStop)
// Start the reconciler again, we hope reconciler will perform the
// resize operation and clear the fsResizeRequired flag for volume.
go reconciler.Run(wait.NeverStop)
waitErr := retryWithExponentialBackOff(500*time.Millisecond, func() (done bool, err error) {
mounted, _, err := asw.PodExistsInVolume(podName, volumeName)
return mounted && err == nil, nil
})
if waitErr != nil {
t.Fatal("Volume resize should succeeded")
waitErr := retryWithExponentialBackOff(500*time.Millisecond, func() (done bool, err error) {
mounted, _, err := asw.PodExistsInVolume(podName, volumeName)
return mounted && err == nil, nil
})
if waitErr != nil {
t.Fatal("Volume resize should succeeded")
}
})
}
}
@ -1138,19 +1223,21 @@ func runReconciler(reconciler Reconciler) {
go reconciler.Run(wait.NeverStop)
}
func createtestClientWithPVPVC(pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) *fake.Clientset {
func createtestClientWithPVPVC(pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim, attachedVolumes ...v1.AttachedVolume) *fake.Clientset {
fakeClient := &fake.Clientset{}
if len(attachedVolumes) == 0 {
attachedVolumes = append(attachedVolumes, v1.AttachedVolume{
Name: "fake-plugin/pv",
DevicePath: "fake/path",
})
}
fakeClient.AddReactor("get", "nodes",
func(action core.Action) (bool, runtime.Object, error) {
return true, &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: string(nodeName)},
Status: v1.NodeStatus{
VolumesAttached: []v1.AttachedVolume{
{
Name: "fake-plugin/pv",
DevicePath: "fake/path",
},
}},
VolumesAttached: attachedVolumes,
},
}, nil
})
fakeClient.AddReactor("get", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {

View File

@ -326,7 +326,15 @@ func (plugin *awsElasticBlockStorePlugin) ExpandVolumeDevice(
}
func (plugin *awsElasticBlockStorePlugin) NodeExpand(resizeOptions volume.NodeResizeOptions) (bool, error) {
_, err := util.GenericResizeFS(plugin.host, plugin.GetPluginName(), resizeOptions.DevicePath, resizeOptions.DeviceMountPath)
fsVolume, err := util.CheckVolumeModeFilesystem(resizeOptions.VolumeSpec)
if err != nil {
return false, fmt.Errorf("error checking VolumeMode: %v", err)
}
// if volume is not a fs file system, there is nothing for us to do here.
if !fsVolume {
return true, nil
}
_, err = util.GenericResizeFS(plugin.host, plugin.GetPluginName(), resizeOptions.DevicePath, resizeOptions.DeviceMountPath)
if err != nil {
return false, err
}

View File

@ -300,7 +300,15 @@ func (plugin *azureDataDiskPlugin) ExpandVolumeDevice(
}
func (plugin *azureDataDiskPlugin) NodeExpand(resizeOptions volume.NodeResizeOptions) (bool, error) {
_, err := util.GenericResizeFS(plugin.host, plugin.GetPluginName(), resizeOptions.DevicePath, resizeOptions.DeviceMountPath)
fsVolume, err := util.CheckVolumeModeFilesystem(resizeOptions.VolumeSpec)
if err != nil {
return false, fmt.Errorf("error checking VolumeMode: %v", err)
}
// if volume is not a fs file system, there is nothing for us to do here.
if !fsVolume {
return true, nil
}
_, err = util.GenericResizeFS(plugin.host, plugin.GetPluginName(), resizeOptions.DevicePath, resizeOptions.DeviceMountPath)
if err != nil {
return false, err
}

View File

@ -313,7 +313,16 @@ func (plugin *cinderPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resour
}
func (plugin *cinderPlugin) NodeExpand(resizeOptions volume.NodeResizeOptions) (bool, error) {
_, err := util.GenericResizeFS(plugin.host, plugin.GetPluginName(), resizeOptions.DevicePath, resizeOptions.DeviceMountPath)
fsVolume, err := util.CheckVolumeModeFilesystem(resizeOptions.VolumeSpec)
if err != nil {
return false, fmt.Errorf("error checking VolumeMode: %v", err)
}
// if volume is not a fs file system, there is nothing for us to do here.
if !fsVolume {
return true, nil
}
_, err = util.GenericResizeFS(plugin.host, plugin.GetPluginName(), resizeOptions.DevicePath, resizeOptions.DeviceMountPath)
if err != nil {
return false, err
}

View File

@ -26,6 +26,7 @@ import (
"k8s.io/klog"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
var _ volume.NodeExpandableVolumePlugin = &csiPlugin{}
@ -52,14 +53,19 @@ func (c *csiPlugin) NodeExpand(resizeOptions volume.NodeResizeOptions) (bool, er
if err != nil {
return false, err
}
fsVolume, err := util.CheckVolumeModeFilesystem(resizeOptions.VolumeSpec)
if err != nil {
return false, errors.New(log("Expander.NodeExpand failed to check VolumeMode of source: %v", err))
}
return c.nodeExpandWithClient(resizeOptions, csiSource, csClient)
return c.nodeExpandWithClient(resizeOptions, csiSource, csClient, fsVolume)
}
func (c *csiPlugin) nodeExpandWithClient(
resizeOptions volume.NodeResizeOptions,
csiSource *api.CSIPersistentVolumeSource,
csClient csiClient) (bool, error) {
csClient csiClient,
fsVolume bool) (bool, error) {
driverName := csiSource.Driver
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
@ -88,7 +94,12 @@ func (c *csiPlugin) nodeExpandWithClient(
return false, nil
}
_, err = csClient.NodeExpandVolume(ctx, csiSource.VolumeHandle, resizeOptions.DeviceMountPath, resizeOptions.NewSize)
volumeTargetPath := resizeOptions.DeviceMountPath
if !fsVolume {
volumeTargetPath = resizeOptions.DevicePath
}
_, err = csClient.NodeExpandVolume(ctx, csiSource.VolumeHandle, volumeTargetPath, resizeOptions.NewSize)
if err != nil {
return false, fmt.Errorf("Expander.NodeExpand failed to expand the volume : %v", err)
}

View File

@ -31,6 +31,7 @@ func TestNodeExpand(t *testing.T) {
nodeStageSet bool
volumePhase volume.CSIVolumePhaseType
success bool
fsVolume bool
}{
{
name: "when node expansion is not set",
@ -42,12 +43,14 @@ func TestNodeExpand(t *testing.T) {
nodeStageSet: true,
volumePhase: volume.CSIVolumeStaged,
success: true,
fsVolume: true,
},
{
name: "when nodeExpansion=on, nodeStage=off, volumePhase=staged",
nodeExpansion: true,
volumePhase: volume.CSIVolumeStaged,
success: false,
fsVolume: true,
},
{
name: "when nodeExpansion=on, nodeStage=on, volumePhase=published",
@ -55,12 +58,21 @@ func TestNodeExpand(t *testing.T) {
nodeStageSet: true,
volumePhase: volume.CSIVolumePublished,
success: true,
fsVolume: true,
},
{
name: "when nodeExpansion=on, nodeStage=off, volumePhase=published",
nodeExpansion: true,
volumePhase: volume.CSIVolumePublished,
success: true,
fsVolume: true,
},
{
name: "when nodeExpansion=on, nodeStage=off, volumePhase=published, fsVolume=false",
nodeExpansion: true,
volumePhase: volume.CSIVolumePublished,
success: true,
fsVolume: false,
},
}
for _, tc := range tests {
@ -76,13 +88,14 @@ func TestNodeExpand(t *testing.T) {
VolumeSpec: spec,
NewSize: newSize,
DeviceMountPath: "/foo/bar",
DevicePath: "/mnt/foobar",
CSIVolumePhase: tc.volumePhase,
}
csiSource, _ := getCSISourceFromSpec(resizeOptions.VolumeSpec)
csClient := setupClientWithExpansion(t, tc.nodeStageSet, tc.nodeExpansion)
ok, err := plug.nodeExpandWithClient(resizeOptions, csiSource, csClient)
ok, err := plug.nodeExpandWithClient(resizeOptions, csiSource, csClient, tc.fsVolume)
if ok != tc.success {
if err != nil {
t.Errorf("For %s : expected %v got %v with %v", tc.name, tc.success, ok, err)

View File

@ -17,6 +17,8 @@ limitations under the License.
package flexvolume
import (
"fmt"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/volume"
@ -40,7 +42,16 @@ func (e *expanderDefaults) ExpandVolumeDevice(spec *volume.Spec, newSize resourc
// generic filesystem resize
func (e *expanderDefaults) NodeExpand(rsOpt volume.NodeResizeOptions) (bool, error) {
klog.Warning(logPrefix(e.plugin), "using default filesystem resize for volume ", rsOpt.VolumeSpec.Name(), ", at ", rsOpt.DevicePath)
_, err := util.GenericResizeFS(e.plugin.host, e.plugin.GetPluginName(), rsOpt.DevicePath, rsOpt.DeviceMountPath)
fsVolume, err := util.CheckVolumeModeFilesystem(rsOpt.VolumeSpec)
if err != nil {
return false, fmt.Errorf("error checking VolumeMode: %v", err)
}
// if volume is not a fs file system, there is nothing for us to do here.
if !fsVolume {
return true, nil
}
_, err = util.GenericResizeFS(e.plugin.host, e.plugin.GetPluginName(), rsOpt.DevicePath, rsOpt.DeviceMountPath)
if err != nil {
return false, err
}

View File

@ -282,7 +282,15 @@ func (plugin *gcePersistentDiskPlugin) ExpandVolumeDevice(
}
func (plugin *gcePersistentDiskPlugin) NodeExpand(resizeOptions volume.NodeResizeOptions) (bool, error) {
_, err := util.GenericResizeFS(plugin.host, plugin.GetPluginName(), resizeOptions.DevicePath, resizeOptions.DeviceMountPath)
fsVolume, err := util.CheckVolumeModeFilesystem(resizeOptions.VolumeSpec)
if err != nil {
return false, fmt.Errorf("error checking VolumeMode: %v", err)
}
// if volume is not a fs file system, there is nothing for us to do here.
if !fsVolume {
return true, nil
}
_, err = util.GenericResizeFS(plugin.host, plugin.GetPluginName(), resizeOptions.DevicePath, resizeOptions.DeviceMountPath)
if err != nil {
return false, err
}

View File

@ -35,6 +35,7 @@ import (
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
volutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
utilstrings "k8s.io/utils/strings"
@ -202,7 +203,15 @@ func (plugin *rbdPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.
}
func (plugin *rbdPlugin) NodeExpand(resizeOptions volume.NodeResizeOptions) (bool, error) {
_, err := volutil.GenericResizeFS(plugin.host, plugin.GetPluginName(), resizeOptions.DevicePath, resizeOptions.DeviceMountPath)
fsVolume, err := util.CheckVolumeModeFilesystem(resizeOptions.VolumeSpec)
if err != nil {
return false, fmt.Errorf("error checking VolumeMode: %v", err)
}
// if volume is not a fs file system, there is nothing for us to do here.
if !fsVolume {
return true, nil
}
_, err = volutil.GenericResizeFS(plugin.host, plugin.GetPluginName(), resizeOptions.DevicePath, resizeOptions.DeviceMountPath)
if err != nil {
return false, err
}

View File

@ -707,12 +707,12 @@ func (og *operationGenerator) GenerateMountVolumeFunc(
resizeOptions.DeviceMountPath = deviceMountPath
resizeOptions.CSIVolumePhase = volume.CSIVolumeStaged
// resizeFileSystem will resize the file system if user has requested a resize of
// NodeExpandVolume will resize the file system if user has requested a resize of
// underlying persistent volume and is allowed to do so.
resizeDone, resizeError = og.resizeFileSystem(volumeToMount, resizeOptions)
resizeDone, resizeError = og.nodeExpandVolume(volumeToMount, resizeOptions)
if resizeError != nil {
klog.Errorf("MountVolume.resizeFileSystem failed with %v", resizeError)
klog.Errorf("MountVolume.NodeExpandVolume failed with %v", resizeError)
return volumeToMount.GenerateError("MountVolume.MountDevice failed while expanding volume", resizeError)
}
}
@ -750,9 +750,9 @@ func (og *operationGenerator) GenerateMountVolumeFunc(
// - Volume does not support DeviceMounter interface.
// - In case of CSI the volume does not have node stage_unstage capability.
if !resizeDone {
resizeDone, resizeError = og.resizeFileSystem(volumeToMount, resizeOptions)
resizeDone, resizeError = og.nodeExpandVolume(volumeToMount, resizeOptions)
if resizeError != nil {
klog.Errorf("MountVolume.resizeFileSystem failed with %v", resizeError)
klog.Errorf("MountVolume.NodeExpandVolume failed with %v", resizeError)
return volumeToMount.GenerateError("MountVolume.Setup failed while expanding volume", resizeError)
}
}
@ -789,72 +789,6 @@ func (og *operationGenerator) GenerateMountVolumeFunc(
}
}
func (og *operationGenerator) resizeFileSystem(volumeToMount VolumeToMount, rsOpts volume.NodeResizeOptions) (bool, error) {
if !utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) {
klog.V(4).Infof("Resizing is not enabled for this volume %s", volumeToMount.VolumeName)
return true, nil
}
if volumeToMount.VolumeSpec != nil &&
volumeToMount.VolumeSpec.InlineVolumeSpecForCSIMigration {
klog.V(4).Infof("This volume %s is a migrated inline volume and is not resizable", volumeToMount.VolumeName)
return true, nil
}
// Get expander, if possible
expandableVolumePlugin, _ :=
og.volumePluginMgr.FindNodeExpandablePluginBySpec(volumeToMount.VolumeSpec)
if expandableVolumePlugin != nil &&
expandableVolumePlugin.RequiresFSResize() &&
volumeToMount.VolumeSpec.PersistentVolume != nil {
pv := volumeToMount.VolumeSpec.PersistentVolume
pvc, err := og.kubeClient.CoreV1().PersistentVolumeClaims(pv.Spec.ClaimRef.Namespace).Get(pv.Spec.ClaimRef.Name, metav1.GetOptions{})
if err != nil {
// Return error rather than leave the file system un-resized, caller will log and retry
return false, fmt.Errorf("MountVolume.resizeFileSystem get PVC failed : %v", err)
}
pvcStatusCap := pvc.Status.Capacity[v1.ResourceStorage]
pvSpecCap := pv.Spec.Capacity[v1.ResourceStorage]
if pvcStatusCap.Cmp(pvSpecCap) < 0 {
// File system resize was requested, proceed
klog.V(4).Infof(volumeToMount.GenerateMsgDetailed("MountVolume.resizeFileSystem entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath)))
if volumeToMount.VolumeSpec.ReadOnly {
simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.resizeFileSystem failed", "requested read-only file system")
klog.Warningf(detailedMsg)
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg)
return true, nil
}
rsOpts.VolumeSpec = volumeToMount.VolumeSpec
rsOpts.NewSize = pvSpecCap
rsOpts.OldSize = pvcStatusCap
resizeDone, resizeErr := expandableVolumePlugin.NodeExpand(rsOpts)
if resizeErr != nil {
return false, fmt.Errorf("MountVolume.resizeFileSystem failed : %v", resizeErr)
}
// Volume resizing is not done but it did not error out. This could happen if a CSI volume
// does not have node stage_unstage capability but was asked to resize the volume before
// node publish. In which case - we must retry resizing after node publish.
if !resizeDone {
return false, nil
}
simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.resizeFileSystem succeeded", "")
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeNormal, kevents.FileSystemResizeSuccess, simpleMsg)
klog.Infof(detailedMsg)
// File system resize succeeded, now update the PVC's Capacity to match the PV's
err = util.MarkFSResizeFinished(pvc, pvSpecCap, og.kubeClient)
if err != nil {
// On retry, resizeFileSystem will be called again but do nothing
return false, fmt.Errorf("MountVolume.resizeFileSystem update PVC status failed : %v", err)
}
return true, nil
}
}
return true, nil
}
func (og *operationGenerator) GenerateUnmountVolumeFunc(
volumeToUnmount MountedVolume,
actualStateOfWorld ActualStateOfWorldMounterUpdater,
@ -1165,6 +1099,16 @@ func (og *operationGenerator) GenerateMapVolumeFunc(
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeNormal, kevents.SuccessfulMountVolume, simpleMsg)
klog.V(verbosity).Infof(detailedMsg)
resizeOptions := volume.NodeResizeOptions{
DevicePath: devicePath,
CSIVolumePhase: volume.CSIVolumePublished,
}
_, resizeError := og.nodeExpandVolume(volumeToMount, resizeOptions)
if resizeError != nil {
klog.Errorf("MapVolume.NodeExpandVolume failed with %v", resizeError)
return volumeToMount.GenerateError("MapVolume.MarkVolumeAsMounted failed while expanding volume", resizeError)
}
// Update actual state of world
markVolMountedErr := actualStateOfWorld.MarkVolumeAsMounted(
volumeToMount.PodName,
@ -1623,14 +1567,14 @@ func (og *operationGenerator) GenerateExpandInUseVolumeFunc(
if useCSIPlugin(og.volumePluginMgr, volumeToMount.VolumeSpec) {
csiSpec, err := translateSpec(volumeToMount.VolumeSpec)
if err != nil {
return volumeToMount.GenerateError("VolumeFSResize.translateSpec failed", err)
return volumeToMount.GenerateError("NodeExpandVolume.translateSpec failed", err)
}
volumeToMount.VolumeSpec = csiSpec
}
volumePlugin, err :=
og.volumePluginMgr.FindPluginBySpec(volumeToMount.VolumeSpec)
if err != nil || volumePlugin == nil {
return volumeToMount.GenerateError("VolumeFSResize.FindPluginBySpec failed", err)
return volumeToMount.GenerateError("NodeExpandVolume.FindPluginBySpec failed", err)
}
var resizeDone bool
@ -1649,7 +1593,7 @@ func (og *operationGenerator) GenerateExpandInUseVolumeFunc(
resizeOptions.DevicePath = volumeToMount.DevicePath
dmp, err := volumeAttacher.GetDeviceMountPath(volumeToMount.VolumeSpec)
if err != nil {
return volumeToMount.GenerateError("VolumeFSResize.GetDeviceMountPath failed", err)
return volumeToMount.GenerateError("NodeExpandVolume.GetDeviceMountPath failed", err)
}
resizeOptions.DeviceMountPath = dmp
resizeDone, simpleErr, detailedErr = og.doOnlineExpansion(volumeToMount, actualStateOfWorld, resizeOptions)
@ -1667,7 +1611,7 @@ func (og *operationGenerator) GenerateExpandInUseVolumeFunc(
volumeToMount.Pod,
volume.VolumeOptions{})
if newMounterErr != nil {
return volumeToMount.GenerateError("VolumeFSResize.NewMounter initialization failed", newMounterErr)
return volumeToMount.GenerateError("NodeExpandVolume.NewMounter initialization failed", newMounterErr)
}
resizeOptions.DeviceMountPath = volumeMounter.GetPath()
@ -1681,7 +1625,7 @@ func (og *operationGenerator) GenerateExpandInUseVolumeFunc(
}
// This is a placeholder error - we should NEVER reach here.
err = fmt.Errorf("volume resizing failed for unknown reason")
return volumeToMount.GenerateError("VolumeFSResize.resizeFileSystem failed to resize volume", err)
return volumeToMount.GenerateError("NodeExpandVolume.NodeExpandVolume failed to resize volume", err)
}
eventRecorderFunc := func(err *error) {
@ -1705,7 +1649,7 @@ func (og *operationGenerator) GenerateExpandInUseVolumeFunc(
volumePlugin, err :=
og.volumePluginMgr.FindPluginBySpec(volumeToMount.VolumeSpec)
if err != nil || volumePlugin == nil {
return volumetypes.GeneratedOperations{}, volumeToMount.GenerateErrorDetailed("VolumeFSResize.FindPluginBySpec failed", err)
return volumetypes.GeneratedOperations{}, volumeToMount.GenerateErrorDetailed("NodeExpandVolume.FindPluginBySpec failed", err)
}
return volumetypes.GeneratedOperations{
@ -1719,17 +1663,18 @@ func (og *operationGenerator) GenerateExpandInUseVolumeFunc(
func (og *operationGenerator) doOnlineExpansion(volumeToMount VolumeToMount,
actualStateOfWorld ActualStateOfWorldMounterUpdater,
resizeOptions volume.NodeResizeOptions) (bool, error, error) {
resizeDone, err := og.resizeFileSystem(volumeToMount, resizeOptions)
resizeDone, err := og.nodeExpandVolume(volumeToMount, resizeOptions)
if err != nil {
klog.Errorf("VolumeFSResize.resizeFileSystem failed : %v", err)
e1, e2 := volumeToMount.GenerateError("VolumeFSResize.resizeFileSystem failed", err)
klog.Errorf("NodeExpandVolume.NodeExpandVolume failed : %v", err)
e1, e2 := volumeToMount.GenerateError("NodeExpandVolume.NodeExpandVolume failed", err)
return false, e1, e2
}
if resizeDone {
markFSResizedErr := actualStateOfWorld.MarkVolumeAsResized(volumeToMount.PodName, volumeToMount.VolumeName)
if markFSResizedErr != nil {
// On failure, return error. Caller will log and retry.
e1, e2 := volumeToMount.GenerateError("VolumeFSResize.MarkVolumeAsResized failed", markFSResizedErr)
e1, e2 := volumeToMount.GenerateError("NodeExpandVolume.MarkVolumeAsResized failed", markFSResizedErr)
return false, e1, e2
}
return true, nil, nil
@ -1737,6 +1682,72 @@ func (og *operationGenerator) doOnlineExpansion(volumeToMount VolumeToMount,
return false, nil, nil
}
func (og *operationGenerator) nodeExpandVolume(volumeToMount VolumeToMount, rsOpts volume.NodeResizeOptions) (bool, error) {
if !utilfeature.DefaultFeatureGate.Enabled(features.ExpandPersistentVolumes) {
klog.V(4).Infof("Resizing is not enabled for this volume %s", volumeToMount.VolumeName)
return true, nil
}
if volumeToMount.VolumeSpec != nil &&
volumeToMount.VolumeSpec.InlineVolumeSpecForCSIMigration {
klog.V(4).Infof("This volume %s is a migrated inline volume and is not resizable", volumeToMount.VolumeName)
return true, nil
}
// Get expander, if possible
expandableVolumePlugin, _ :=
og.volumePluginMgr.FindNodeExpandablePluginBySpec(volumeToMount.VolumeSpec)
if expandableVolumePlugin != nil &&
expandableVolumePlugin.RequiresFSResize() &&
volumeToMount.VolumeSpec.PersistentVolume != nil {
pv := volumeToMount.VolumeSpec.PersistentVolume
pvc, err := og.kubeClient.CoreV1().PersistentVolumeClaims(pv.Spec.ClaimRef.Namespace).Get(pv.Spec.ClaimRef.Name, metav1.GetOptions{})
if err != nil {
// Return error rather than leave the file system un-resized, caller will log and retry
return false, fmt.Errorf("MountVolume.NodeExpandVolume get PVC failed : %v", err)
}
pvcStatusCap := pvc.Status.Capacity[v1.ResourceStorage]
pvSpecCap := pv.Spec.Capacity[v1.ResourceStorage]
if pvcStatusCap.Cmp(pvSpecCap) < 0 {
// File system resize was requested, proceed
klog.V(4).Infof(volumeToMount.GenerateMsgDetailed("MountVolume.NodeExpandVolume entering", fmt.Sprintf("DevicePath %q", volumeToMount.DevicePath)))
if volumeToMount.VolumeSpec.ReadOnly {
simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.NodeExpandVolume failed", "requested read-only file system")
klog.Warningf(detailedMsg)
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeWarning, kevents.FileSystemResizeFailed, simpleMsg)
return true, nil
}
rsOpts.VolumeSpec = volumeToMount.VolumeSpec
rsOpts.NewSize = pvSpecCap
rsOpts.OldSize = pvcStatusCap
resizeDone, resizeErr := expandableVolumePlugin.NodeExpand(rsOpts)
if resizeErr != nil {
return false, fmt.Errorf("MountVolume.NodeExpandVolume failed : %v", resizeErr)
}
// Volume resizing is not done but it did not error out. This could happen if a CSI volume
// does not have node stage_unstage capability but was asked to resize the volume before
// node publish. In which case - we must retry resizing after node publish.
if !resizeDone {
return false, nil
}
simpleMsg, detailedMsg := volumeToMount.GenerateMsg("MountVolume.NodeExpandVolume succeeded", "")
og.recorder.Eventf(volumeToMount.Pod, v1.EventTypeNormal, kevents.FileSystemResizeSuccess, simpleMsg)
klog.Infof(detailedMsg)
// File system resize succeeded, now update the PVC's Capacity to match the PV's
err = util.MarkFSResizeFinished(pvc, pvSpecCap, og.kubeClient)
if err != nil {
// On retry, NodeExpandVolume will be called again but do nothing
return false, fmt.Errorf("MountVolume.NodeExpandVolume update PVC status failed : %v", err)
}
return true, nil
}
}
return true, nil
}
func checkMountOptionSupport(og *operationGenerator, volumeToMount VolumeToMount, plugin volume.VolumePlugin) error {
mountOptions := util.MountOptionFromSpec(volumeToMount.VolumeSpec)