mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 04:06:03 +00:00
Fix devicePath for raw block expansion
Fix tests
This commit is contained in:
parent
7a4cdf5ab2
commit
9dbe0b3ad8
@ -11,7 +11,6 @@ go_library(
|
||||
srcs = ["desired_state_of_world_populator.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/volumemanager/populator",
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/kubelet/config:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
|
@ -388,7 +388,10 @@ func (dswp *desiredStateOfWorldPopulator) checkVolumeFSResize(
|
||||
// so we only need to check it once if more than one pod use it.
|
||||
return
|
||||
}
|
||||
if mountedReadOnlyByPod(podVolume, pod) {
|
||||
// volumeSpec.ReadOnly is the value that determines if volume could be formatted when being mounted.
|
||||
// This is the same flag that determines filesystem resizing behaviour for offline resizing and hence
|
||||
// we should use it here. This value comes from Pod.spec.volumes.persistentVolumeClaim.readOnly.
|
||||
if volumeSpec.ReadOnly {
|
||||
// This volume is used as read only by this pod, we don't perform resize for read only volumes.
|
||||
klog.V(5).Infof("Skip file system resize check for volume %s in pod %s/%s "+
|
||||
"as the volume is mounted as readonly", podVolume.Name, pod.Namespace, pod.Name)
|
||||
@ -400,13 +403,6 @@ func (dswp *desiredStateOfWorldPopulator) checkVolumeFSResize(
|
||||
processedVolumesForFSResize.Insert(string(uniqueVolumeName))
|
||||
}
|
||||
|
||||
func mountedReadOnlyByPod(podVolume v1.Volume, pod *v1.Pod) bool {
|
||||
if podVolume.PersistentVolumeClaim.ReadOnly {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func getUniqueVolumeName(
|
||||
podName volumetypes.UniquePodName,
|
||||
outerVolumeSpecName string,
|
||||
|
@ -442,11 +442,47 @@ func Test_Run_Positive_VolumeAttachAndMap(t *testing.T) {
|
||||
// Enable BlockVolume feature gate
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, true)()
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
Namespace: "ns",
|
||||
},
|
||||
Spec: v1.PodSpec{},
|
||||
}
|
||||
|
||||
mode := v1.PersistentVolumeBlock
|
||||
gcepv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
},
|
||||
VolumeMode: &mode,
|
||||
ClaimRef: &v1.ObjectReference{Namespace: "ns", Name: "pvc-volume-name"},
|
||||
},
|
||||
}
|
||||
|
||||
gcepvc := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "pvc-001", Name: "pvc-volume-name", Namespace: "ns"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "volume-name",
|
||||
VolumeMode: &mode,
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimBound,
|
||||
Capacity: gcepv.Spec.Capacity,
|
||||
},
|
||||
}
|
||||
|
||||
// Arrange
|
||||
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
|
||||
kubeClient := createTestClient()
|
||||
kubeClient := createtestClientWithPVPVC(gcepv, gcepvc)
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
fakeHandler := volumetesting.NewBlockVolumePathHandler()
|
||||
oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
|
||||
@ -469,27 +505,6 @@ func Test_Run_Positive_VolumeAttachAndMap(t *testing.T) {
|
||||
&mount.FakeHostUtil{},
|
||||
volumePluginMgr,
|
||||
kubeletPodsDir)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{},
|
||||
}
|
||||
|
||||
mode := v1.PersistentVolumeBlock
|
||||
gcepv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
},
|
||||
VolumeMode: &mode,
|
||||
},
|
||||
}
|
||||
|
||||
volumeSpec := &volume.Spec{
|
||||
PersistentVolume: gcepv,
|
||||
@ -527,11 +542,53 @@ func Test_Run_Positive_BlockVolumeMapControllerAttachEnabled(t *testing.T) {
|
||||
// Enable BlockVolume feature gate
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, true)()
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
Namespace: "ns",
|
||||
},
|
||||
Spec: v1.PodSpec{},
|
||||
}
|
||||
|
||||
mode := v1.PersistentVolumeBlock
|
||||
gcepv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
},
|
||||
VolumeMode: &mode,
|
||||
ClaimRef: &v1.ObjectReference{Namespace: "ns", Name: "pvc-volume-name"},
|
||||
},
|
||||
}
|
||||
gcepvc := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "pvc-001", Name: "pvc-volume-name", Namespace: "ns"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "volume-name",
|
||||
VolumeMode: &mode,
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimBound,
|
||||
Capacity: gcepv.Spec.Capacity,
|
||||
},
|
||||
}
|
||||
|
||||
volumeSpec := &volume.Spec{
|
||||
PersistentVolume: gcepv,
|
||||
}
|
||||
|
||||
// Arrange
|
||||
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
|
||||
kubeClient := createTestClient()
|
||||
kubeClient := createtestClientWithPVPVC(gcepv, gcepvc, v1.AttachedVolume{
|
||||
Name: "fake-plugin/fake-device1",
|
||||
DevicePath: "/fake/path",
|
||||
})
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
fakeHandler := volumetesting.NewBlockVolumePathHandler()
|
||||
oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
|
||||
@ -554,31 +611,7 @@ func Test_Run_Positive_BlockVolumeMapControllerAttachEnabled(t *testing.T) {
|
||||
&mount.FakeHostUtil{},
|
||||
volumePluginMgr,
|
||||
kubeletPodsDir)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{},
|
||||
}
|
||||
|
||||
mode := v1.PersistentVolumeBlock
|
||||
gcepv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
},
|
||||
VolumeMode: &mode,
|
||||
},
|
||||
}
|
||||
|
||||
volumeSpec := &volume.Spec{
|
||||
PersistentVolume: gcepv,
|
||||
}
|
||||
podName := util.GetUniquePodName(pod)
|
||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
@ -613,11 +646,50 @@ func Test_Run_Positive_BlockVolumeAttachMapUnmapDetach(t *testing.T) {
|
||||
// Enable BlockVolume feature gate
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, true)()
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
Namespace: "ns",
|
||||
},
|
||||
Spec: v1.PodSpec{},
|
||||
}
|
||||
|
||||
mode := v1.PersistentVolumeBlock
|
||||
gcepv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
},
|
||||
VolumeMode: &mode,
|
||||
ClaimRef: &v1.ObjectReference{Namespace: "ns", Name: "pvc-volume-name"},
|
||||
},
|
||||
}
|
||||
gcepvc := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "pvc-001", Name: "pvc-volume-name", Namespace: "ns"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "volume-name",
|
||||
VolumeMode: &mode,
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimBound,
|
||||
Capacity: gcepv.Spec.Capacity,
|
||||
},
|
||||
}
|
||||
|
||||
volumeSpec := &volume.Spec{
|
||||
PersistentVolume: gcepv,
|
||||
}
|
||||
|
||||
// Arrange
|
||||
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
|
||||
kubeClient := createTestClient()
|
||||
kubeClient := createtestClientWithPVPVC(gcepv, gcepvc)
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
fakeHandler := volumetesting.NewBlockVolumePathHandler()
|
||||
oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
|
||||
@ -640,31 +712,7 @@ func Test_Run_Positive_BlockVolumeAttachMapUnmapDetach(t *testing.T) {
|
||||
&mount.FakeHostUtil{},
|
||||
volumePluginMgr,
|
||||
kubeletPodsDir)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{},
|
||||
}
|
||||
|
||||
mode := v1.PersistentVolumeBlock
|
||||
gcepv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
},
|
||||
VolumeMode: &mode,
|
||||
},
|
||||
}
|
||||
|
||||
volumeSpec := &volume.Spec{
|
||||
PersistentVolume: gcepv,
|
||||
}
|
||||
podName := util.GetUniquePodName(pod)
|
||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
@ -709,11 +757,53 @@ func Test_Run_Positive_VolumeUnmapControllerAttachEnabled(t *testing.T) {
|
||||
// Enable BlockVolume feature gate
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.BlockVolume, true)()
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
Namespace: "ns",
|
||||
},
|
||||
Spec: v1.PodSpec{},
|
||||
}
|
||||
|
||||
mode := v1.PersistentVolumeBlock
|
||||
gcepv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
},
|
||||
VolumeMode: &mode,
|
||||
ClaimRef: &v1.ObjectReference{Namespace: "ns", Name: "pvc-volume-name"},
|
||||
},
|
||||
}
|
||||
gcepvc := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "pvc-001", Name: "pvc-volume-name", Namespace: "ns"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "volume-name",
|
||||
VolumeMode: &mode,
|
||||
},
|
||||
Status: v1.PersistentVolumeClaimStatus{
|
||||
Phase: v1.ClaimBound,
|
||||
Capacity: gcepv.Spec.Capacity,
|
||||
},
|
||||
}
|
||||
|
||||
volumeSpec := &volume.Spec{
|
||||
PersistentVolume: gcepv,
|
||||
}
|
||||
|
||||
// Arrange
|
||||
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
|
||||
kubeClient := createTestClient()
|
||||
kubeClient := createtestClientWithPVPVC(gcepv, gcepvc, v1.AttachedVolume{
|
||||
Name: "fake-plugin/fake-device1",
|
||||
DevicePath: "/fake/path",
|
||||
})
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
fakeHandler := volumetesting.NewBlockVolumePathHandler()
|
||||
oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
|
||||
@ -736,31 +826,7 @@ func Test_Run_Positive_VolumeUnmapControllerAttachEnabled(t *testing.T) {
|
||||
&mount.FakeHostUtil{},
|
||||
volumePluginMgr,
|
||||
kubeletPodsDir)
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{},
|
||||
}
|
||||
|
||||
mode := v1.PersistentVolumeBlock
|
||||
gcepv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
v1.ReadOnlyMany,
|
||||
},
|
||||
VolumeMode: &mode,
|
||||
},
|
||||
}
|
||||
|
||||
volumeSpec := &volume.Spec{
|
||||
PersistentVolume: gcepv,
|
||||
}
|
||||
podName := util.GetUniquePodName(pod)
|
||||
generatedVolumeName, err := dsw.AddPodToVolume(
|
||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
@ -948,113 +1014,132 @@ func Test_GenerateUnmapDeviceFunc_Plugin_Not_Found(t *testing.T) {
|
||||
// Verifies volume's fsResizeRequired flag is cleared later.
|
||||
func Test_Run_Positive_VolumeFSResizeControllerAttachEnabled(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ExpandInUsePersistentVolumes, true)()
|
||||
blockMode := v1.PersistentVolumeBlock
|
||||
fsMode := v1.PersistentVolumeFilesystem
|
||||
|
||||
fs := v1.PersistentVolumeFilesystem
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv",
|
||||
UID: "pvuid",
|
||||
var tests = []struct {
|
||||
name string
|
||||
volumeMode *v1.PersistentVolumeMode
|
||||
}{
|
||||
{
|
||||
name: "expand-fs-volume",
|
||||
volumeMode: &fsMode,
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
ClaimRef: &v1.ObjectReference{Name: "pvc"},
|
||||
VolumeMode: &fs,
|
||||
{
|
||||
name: "expand-raw-block",
|
||||
volumeMode: &blockMode,
|
||||
},
|
||||
}
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvc",
|
||||
UID: "pvcuid",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv",
|
||||
VolumeMode: &fs,
|
||||
},
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "volume-name",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvc.Name,
|
||||
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pv",
|
||||
UID: "pvuid",
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
ClaimRef: &v1.ObjectReference{Name: "pvc"},
|
||||
VolumeMode: tc.volumeMode,
|
||||
},
|
||||
}
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pvc",
|
||||
UID: "pvcuid",
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeName: "pv",
|
||||
VolumeMode: tc.volumeMode,
|
||||
},
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
UID: "pod1uid",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "volume-name",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvc.Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
|
||||
kubeClient := createtestClientWithPVPVC(pv, pvc)
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
fakeHandler := volumetesting.NewBlockVolumePathHandler()
|
||||
oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
|
||||
kubeClient,
|
||||
volumePluginMgr,
|
||||
fakeRecorder,
|
||||
false, /* checkNodeCapabilitiesBeforeMount */
|
||||
fakeHandler))
|
||||
volumePluginMgr, fakePlugin := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := cache.NewDesiredStateOfWorld(volumePluginMgr)
|
||||
asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
|
||||
kubeClient := createtestClientWithPVPVC(pv, pvc)
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
fakeHandler := volumetesting.NewBlockVolumePathHandler()
|
||||
oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
|
||||
kubeClient,
|
||||
volumePluginMgr,
|
||||
fakeRecorder,
|
||||
false, /* checkNodeCapabilitiesBeforeMount */
|
||||
fakeHandler))
|
||||
|
||||
reconciler := NewReconciler(
|
||||
kubeClient,
|
||||
true, /* controllerAttachDetachEnabled */
|
||||
reconcilerLoopSleepDuration,
|
||||
waitForAttachTimeout,
|
||||
nodeName,
|
||||
dsw,
|
||||
asw,
|
||||
hasAddedPods,
|
||||
oex,
|
||||
&mount.FakeMounter{},
|
||||
&mount.FakeHostUtil{},
|
||||
volumePluginMgr,
|
||||
kubeletPodsDir)
|
||||
reconciler := NewReconciler(
|
||||
kubeClient,
|
||||
true, /* controllerAttachDetachEnabled */
|
||||
reconcilerLoopSleepDuration,
|
||||
waitForAttachTimeout,
|
||||
nodeName,
|
||||
dsw,
|
||||
asw,
|
||||
hasAddedPods,
|
||||
oex,
|
||||
&mount.FakeMounter{},
|
||||
&mount.FakeHostUtil{},
|
||||
volumePluginMgr,
|
||||
kubeletPodsDir)
|
||||
|
||||
volumeSpec := &volume.Spec{PersistentVolume: pv}
|
||||
podName := util.GetUniquePodName(pod)
|
||||
volumeName, err := dsw.AddPodToVolume(
|
||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
// Assert
|
||||
if err != nil {
|
||||
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{volumeName})
|
||||
volumeSpec := &volume.Spec{PersistentVolume: pv}
|
||||
podName := util.GetUniquePodName(pod)
|
||||
volumeName, err := dsw.AddPodToVolume(
|
||||
podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */)
|
||||
// Assert
|
||||
if err != nil {
|
||||
t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
|
||||
}
|
||||
dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{volumeName})
|
||||
|
||||
// Start the reconciler to fill ASW.
|
||||
stopChan, stoppedChan := make(chan struct{}), make(chan struct{})
|
||||
go func() {
|
||||
reconciler.Run(stopChan)
|
||||
close(stoppedChan)
|
||||
}()
|
||||
waitForMount(t, fakePlugin, volumeName, asw)
|
||||
// Stop the reconciler.
|
||||
close(stopChan)
|
||||
<-stoppedChan
|
||||
// Start the reconciler to fill ASW.
|
||||
stopChan, stoppedChan := make(chan struct{}), make(chan struct{})
|
||||
go func() {
|
||||
reconciler.Run(stopChan)
|
||||
close(stoppedChan)
|
||||
}()
|
||||
waitForMount(t, fakePlugin, volumeName, asw)
|
||||
// Stop the reconciler.
|
||||
close(stopChan)
|
||||
<-stoppedChan
|
||||
|
||||
// Mark volume as fsResizeRequired.
|
||||
asw.MarkFSResizeRequired(volumeName, podName)
|
||||
_, _, podExistErr := asw.PodExistsInVolume(podName, volumeName)
|
||||
if !cache.IsFSResizeRequiredError(podExistErr) {
|
||||
t.Fatalf("Volume should be marked as fsResizeRequired, but receive unexpected error: %v", podExistErr)
|
||||
}
|
||||
// Mark volume as fsResizeRequired.
|
||||
asw.MarkFSResizeRequired(volumeName, podName)
|
||||
_, _, podExistErr := asw.PodExistsInVolume(podName, volumeName)
|
||||
if !cache.IsFSResizeRequiredError(podExistErr) {
|
||||
t.Fatalf("Volume should be marked as fsResizeRequired, but receive unexpected error: %v", podExistErr)
|
||||
}
|
||||
|
||||
// Start the reconciler again, we hope reconciler will perform the
|
||||
// resize operation and clear the fsResizeRequired flag for volume.
|
||||
go reconciler.Run(wait.NeverStop)
|
||||
// Start the reconciler again, we hope reconciler will perform the
|
||||
// resize operation and clear the fsResizeRequired flag for volume.
|
||||
go reconciler.Run(wait.NeverStop)
|
||||
|
||||
waitErr := retryWithExponentialBackOff(500*time.Millisecond, func() (done bool, err error) {
|
||||
mounted, _, err := asw.PodExistsInVolume(podName, volumeName)
|
||||
return mounted && err == nil, nil
|
||||
})
|
||||
if waitErr != nil {
|
||||
t.Fatal("Volume resize should succeeded")
|
||||
waitErr := retryWithExponentialBackOff(500*time.Millisecond, func() (done bool, err error) {
|
||||
mounted, _, err := asw.PodExistsInVolume(podName, volumeName)
|
||||
return mounted && err == nil, nil
|
||||
})
|
||||
if waitErr != nil {
|
||||
t.Fatal("Volume resize should succeeded")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -1138,19 +1223,21 @@ func runReconciler(reconciler Reconciler) {
|
||||
go reconciler.Run(wait.NeverStop)
|
||||
}
|
||||
|
||||
func createtestClientWithPVPVC(pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) *fake.Clientset {
|
||||
func createtestClientWithPVPVC(pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim, attachedVolumes ...v1.AttachedVolume) *fake.Clientset {
|
||||
fakeClient := &fake.Clientset{}
|
||||
if len(attachedVolumes) == 0 {
|
||||
attachedVolumes = append(attachedVolumes, v1.AttachedVolume{
|
||||
Name: "fake-plugin/pv",
|
||||
DevicePath: "fake/path",
|
||||
})
|
||||
}
|
||||
fakeClient.AddReactor("get", "nodes",
|
||||
func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: string(nodeName)},
|
||||
Status: v1.NodeStatus{
|
||||
VolumesAttached: []v1.AttachedVolume{
|
||||
{
|
||||
Name: "fake-plugin/pv",
|
||||
DevicePath: "fake/path",
|
||||
},
|
||||
}},
|
||||
VolumesAttached: attachedVolumes,
|
||||
},
|
||||
}, nil
|
||||
})
|
||||
fakeClient.AddReactor("get", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
var _ volume.NodeExpandableVolumePlugin = &csiPlugin{}
|
||||
@ -52,14 +53,19 @@ func (c *csiPlugin) NodeExpand(resizeOptions volume.NodeResizeOptions) (bool, er
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
fsVolume, err := util.CheckVolumeModeFilesystem(resizeOptions.VolumeSpec)
|
||||
if err != nil {
|
||||
return false, errors.New(log("Expander.NodeExpand failed to check VolumeMode of source: %v", err))
|
||||
}
|
||||
|
||||
return c.nodeExpandWithClient(resizeOptions, csiSource, csClient)
|
||||
return c.nodeExpandWithClient(resizeOptions, csiSource, csClient, fsVolume)
|
||||
}
|
||||
|
||||
func (c *csiPlugin) nodeExpandWithClient(
|
||||
resizeOptions volume.NodeResizeOptions,
|
||||
csiSource *api.CSIPersistentVolumeSource,
|
||||
csClient csiClient) (bool, error) {
|
||||
csClient csiClient,
|
||||
fsVolume bool) (bool, error) {
|
||||
driverName := csiSource.Driver
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), csiTimeout)
|
||||
@ -88,7 +94,12 @@ func (c *csiPlugin) nodeExpandWithClient(
|
||||
return false, nil
|
||||
}
|
||||
|
||||
_, err = csClient.NodeExpandVolume(ctx, csiSource.VolumeHandle, resizeOptions.DeviceMountPath, resizeOptions.NewSize)
|
||||
volumeTargetPath := resizeOptions.DeviceMountPath
|
||||
if !fsVolume {
|
||||
volumeTargetPath = resizeOptions.DevicePath
|
||||
}
|
||||
|
||||
_, err = csClient.NodeExpandVolume(ctx, csiSource.VolumeHandle, volumeTargetPath, resizeOptions.NewSize)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Expander.NodeExpand failed to expand the volume : %v", err)
|
||||
}
|
||||
|
@ -31,6 +31,7 @@ func TestNodeExpand(t *testing.T) {
|
||||
nodeStageSet bool
|
||||
volumePhase volume.CSIVolumePhaseType
|
||||
success bool
|
||||
fsVolume bool
|
||||
}{
|
||||
{
|
||||
name: "when node expansion is not set",
|
||||
@ -42,12 +43,14 @@ func TestNodeExpand(t *testing.T) {
|
||||
nodeStageSet: true,
|
||||
volumePhase: volume.CSIVolumeStaged,
|
||||
success: true,
|
||||
fsVolume: true,
|
||||
},
|
||||
{
|
||||
name: "when nodeExpansion=on, nodeStage=off, volumePhase=staged",
|
||||
nodeExpansion: true,
|
||||
volumePhase: volume.CSIVolumeStaged,
|
||||
success: false,
|
||||
fsVolume: true,
|
||||
},
|
||||
{
|
||||
name: "when nodeExpansion=on, nodeStage=on, volumePhase=published",
|
||||
@ -55,12 +58,21 @@ func TestNodeExpand(t *testing.T) {
|
||||
nodeStageSet: true,
|
||||
volumePhase: volume.CSIVolumePublished,
|
||||
success: true,
|
||||
fsVolume: true,
|
||||
},
|
||||
{
|
||||
name: "when nodeExpansion=on, nodeStage=off, volumePhase=published",
|
||||
nodeExpansion: true,
|
||||
volumePhase: volume.CSIVolumePublished,
|
||||
success: true,
|
||||
fsVolume: true,
|
||||
},
|
||||
{
|
||||
name: "when nodeExpansion=on, nodeStage=off, volumePhase=published, fsVolume=false",
|
||||
nodeExpansion: true,
|
||||
volumePhase: volume.CSIVolumePublished,
|
||||
success: true,
|
||||
fsVolume: false,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
@ -76,13 +88,14 @@ func TestNodeExpand(t *testing.T) {
|
||||
VolumeSpec: spec,
|
||||
NewSize: newSize,
|
||||
DeviceMountPath: "/foo/bar",
|
||||
DevicePath: "/mnt/foobar",
|
||||
CSIVolumePhase: tc.volumePhase,
|
||||
}
|
||||
csiSource, _ := getCSISourceFromSpec(resizeOptions.VolumeSpec)
|
||||
|
||||
csClient := setupClientWithExpansion(t, tc.nodeStageSet, tc.nodeExpansion)
|
||||
|
||||
ok, err := plug.nodeExpandWithClient(resizeOptions, csiSource, csClient)
|
||||
ok, err := plug.nodeExpandWithClient(resizeOptions, csiSource, csClient, tc.fsVolume)
|
||||
if ok != tc.success {
|
||||
if err != nil {
|
||||
t.Errorf("For %s : expected %v got %v with %v", tc.name, tc.success, ok, err)
|
||||
|
@ -12,7 +12,6 @@ go_library(
|
||||
"fakegenerator.go",
|
||||
"operation_executor.go",
|
||||
"operation_generator.go",
|
||||
"volume_operation.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/util/operationexecutor",
|
||||
deps = [
|
||||
|
Loading…
Reference in New Issue
Block a user