allow FSGroupPolicy and PodInfoOnMount to be mutable in CSIDriver.Spec

This commit is contained in:
Hao Ruan 2023-03-02 16:27:50 +08:00
parent fc479f41b3
commit 531324cb1d
9 changed files with 355 additions and 102 deletions

View File

@ -284,7 +284,7 @@ type CSIDriverSpec struct {
// permission of the volume before being mounted.
// Refer to the specific FSGroupPolicy values for additional details.
//
// This field is immutable.
// This field was immutable in Kubernetes < 1.29 and now is mutable.
//
// Defaults to ReadWriteOnceWithFSType, which will examine each volume
// to determine if Kubernetes should modify ownership and permissions of the volume.
@ -318,7 +318,7 @@ type CSIDriverSpec struct {
// deployed on such a cluster and the deployment determines which mode that is, for example
// via a command line parameter of the driver.
//
// This field is immutable.
// This field was immutable in Kubernetes < 1.29 and now is mutable.
//
// +optional
PodInfoOnMount *bool

View File

@ -414,8 +414,6 @@ func ValidateCSIDriverUpdate(new, old *storage.CSIDriver) field.ErrorList {
// immutable fields should not be mutated.
allErrs = append(allErrs, apimachineryvalidation.ValidateImmutableField(new.Spec.AttachRequired, old.Spec.AttachRequired, field.NewPath("spec", "attachedRequired"))...)
allErrs = append(allErrs, apimachineryvalidation.ValidateImmutableField(new.Spec.FSGroupPolicy, old.Spec.FSGroupPolicy, field.NewPath("spec", "fsGroupPolicy"))...)
allErrs = append(allErrs, apimachineryvalidation.ValidateImmutableField(new.Spec.PodInfoOnMount, old.Spec.PodInfoOnMount, field.NewPath("spec", "podInfoOnMount"))...)
allErrs = append(allErrs, apimachineryvalidation.ValidateImmutableField(new.Spec.VolumeLifecycleModes, old.Spec.VolumeLifecycleModes, field.NewPath("spec", "volumeLifecycleModes"))...)
allErrs = append(allErrs, validateTokenRequests(new.Spec.TokenRequests, field.NewPath("spec", "tokenRequests"))...)

View File

@ -1715,6 +1715,17 @@ func TestCSIDriverValidationUpdate(t *testing.T) {
modify: func(new *storage.CSIDriver) {
new.Spec.SELinuxMount = &notSELinuxMount
},
}, {
name: "change PodInfoOnMount",
modify: func(new *storage.CSIDriver) {
new.Spec.PodInfoOnMount = &podInfoOnMount
},
}, {
name: "change FSGroupPolicy",
modify: func(new *storage.CSIDriver) {
fileFSGroupPolicy := storage.FileFSGroupPolicy
new.Spec.FSGroupPolicy = &fileFSGroupPolicy
},
}}
for _, test := range successCases {
t.Run(test.name, func(t *testing.T) {
@ -1755,11 +1766,6 @@ func TestCSIDriverValidationUpdate(t *testing.T) {
modify: func(new *storage.CSIDriver) {
new.Spec.PodInfoOnMount = nil
},
}, {
name: "PodInfoOnMount changed",
modify: func(new *storage.CSIDriver) {
new.Spec.PodInfoOnMount = &podInfoOnMount
},
}, {
name: "invalid volume lifecycle mode",
modify: func(new *storage.CSIDriver) {
@ -1792,12 +1798,6 @@ func TestCSIDriverValidationUpdate(t *testing.T) {
invalidFSGroupPolicy := storage.FSGroupPolicy("invalid")
new.Spec.FSGroupPolicy = &invalidFSGroupPolicy
},
}, {
name: "FSGroupPolicy changed",
modify: func(new *storage.CSIDriver) {
fileFSGroupPolicy := storage.FileFSGroupPolicy
new.Spec.FSGroupPolicy = &fileFSGroupPolicy
},
}, {
name: "TokenRequests invalidated",
modify: func(new *storage.CSIDriver) {

View File

@ -83,10 +83,8 @@ func (csiDriverStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.
newCSIDriver.Spec.SELinuxMount = nil
}
// Any changes to the mutable fields increment the generation number.
if !apiequality.Semantic.DeepEqual(oldCSIDriver.Spec.TokenRequests, newCSIDriver.Spec.TokenRequests) ||
!apiequality.Semantic.DeepEqual(oldCSIDriver.Spec.RequiresRepublish, newCSIDriver.Spec.RequiresRepublish) ||
!apiequality.Semantic.DeepEqual(oldCSIDriver.Spec.SELinuxMount, newCSIDriver.Spec.SELinuxMount) {
// Any changes to the spec increment the generation number.
if !apiequality.Semantic.DeepEqual(oldCSIDriver.Spec, newCSIDriver.Spec) {
newCSIDriver.Generation = oldCSIDriver.Generation + 1
}
}

View File

@ -88,7 +88,6 @@ func TestCSIDriverPrepareForUpdate(t *testing.T) {
})
attachRequired := true
podInfoOnMount := true
driverWithNothing := &storage.CSIDriver{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
@ -100,7 +99,6 @@ func TestCSIDriverPrepareForUpdate(t *testing.T) {
},
Spec: storage.CSIDriverSpec{
AttachRequired: &attachRequired,
PodInfoOnMount: &podInfoOnMount,
VolumeLifecycleModes: []storage.VolumeLifecycleMode{
storage.VolumeLifecyclePersistent,
},
@ -109,6 +107,49 @@ func TestCSIDriverPrepareForUpdate(t *testing.T) {
enabled := true
disabled := false
gcp := "gcp"
noneFsGroupPolicy := storage.NoneFSGroupPolicy
readWriteOnceWithFSTypeFSGroupPolicy := storage.ReadWriteOnceWithFSTypeFSGroupPolicy
fileFSGroupPolicy := storage.FileFSGroupPolicy
driverWithPodInfoOnMountEnabled := &storage.CSIDriver{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: storage.CSIDriverSpec{
PodInfoOnMount: &enabled,
},
}
driverWithPodInfoOnMountDisabled := &storage.CSIDriver{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: storage.CSIDriverSpec{
PodInfoOnMount: &disabled,
},
}
driverWithNoneFSGroupPolicy := &storage.CSIDriver{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: storage.CSIDriverSpec{
FSGroupPolicy: &noneFsGroupPolicy,
},
}
driverWithReadWriteOnceWithFSTypeFSGroupPolicy := &storage.CSIDriver{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: storage.CSIDriverSpec{
FSGroupPolicy: &readWriteOnceWithFSTypeFSGroupPolicy,
},
}
driverWithFileFSGroupPolicy := &storage.CSIDriver{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: storage.CSIDriverSpec{
FSGroupPolicy: &fileFSGroupPolicy,
},
}
driverWithCapacityEnabled := &storage.CSIDriver{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
@ -165,22 +206,91 @@ func TestCSIDriverPrepareForUpdate(t *testing.T) {
wantSELinuxMount *bool
}{
{
name: "capacity feature enabled, before: none, update: enabled",
old: driverWithNothing,
update: driverWithCapacityEnabled,
wantCapacity: &enabled,
name: "podInfoOnMount feature enabled, before: none, update: enabled",
old: driverWithNothing,
update: driverWithPodInfoOnMountEnabled,
wantGeneration: 1,
},
{
name: "capacity feature enabled, before: enabled, update: disabled",
old: driverWithCapacityEnabled,
update: driverWithCapacityDisabled,
wantCapacity: &disabled,
name: "podInfoOnMount feature enabled, before: enabled, update: disabled",
old: driverWithPodInfoOnMountEnabled,
update: driverWithPodInfoOnMountDisabled,
wantGeneration: 1,
},
{
name: "inline feature enabled, before: none, update: persistent",
old: driverWithNothing,
update: driverWithPersistent,
wantModes: resultPersistent,
name: "fSGroupPolicy feature enabled, before: nil, update: none",
old: driverWithNothing,
update: driverWithNoneFSGroupPolicy,
wantGeneration: 1,
},
{
name: "fSGroupPolicy feature enabled, before: nil, update: readWriteOnceWithFSType",
old: driverWithNothing,
update: driverWithReadWriteOnceWithFSTypeFSGroupPolicy,
wantGeneration: 1,
},
{
name: "fSGroupPolicy feature enabled, before: nil, update: file",
old: driverWithNothing,
update: driverWithFileFSGroupPolicy,
wantGeneration: 1,
},
{
name: "fSGroupPolicy feature enabled, before: none, update: readWriteOnceWithFSType",
old: driverWithNoneFSGroupPolicy,
update: driverWithReadWriteOnceWithFSTypeFSGroupPolicy,
wantGeneration: 1,
},
{
name: "fSGroupPolicy feature enabled, before: none, update: file",
old: driverWithNoneFSGroupPolicy,
update: driverWithFileFSGroupPolicy,
wantGeneration: 1,
},
{
name: "fSGroupPolicy feature enabled, before: readWriteOnceWithFSType, update: none",
old: driverWithReadWriteOnceWithFSTypeFSGroupPolicy,
update: driverWithNoneFSGroupPolicy,
wantGeneration: 1,
},
{
name: "fSGroupPolicy feature enabled, before: readWriteOnceWithFSType, update: file",
old: driverWithReadWriteOnceWithFSTypeFSGroupPolicy,
update: driverWithFileFSGroupPolicy,
wantGeneration: 1,
},
{
name: "fSGroupPolicy feature enabled, before: file, update: none",
old: driverWithFileFSGroupPolicy,
update: driverWithNoneFSGroupPolicy,
wantGeneration: 1,
},
{
name: "fSGroupPolicy feature enabled, before: file, update: readWriteOnceWithFSType",
old: driverWithFileFSGroupPolicy,
update: driverWithReadWriteOnceWithFSTypeFSGroupPolicy,
wantGeneration: 1,
},
{
name: "capacity feature enabled, before: none, update: enabled",
old: driverWithNothing,
update: driverWithCapacityEnabled,
wantCapacity: &enabled,
wantGeneration: 1,
},
{
name: "capacity feature enabled, before: enabled, update: disabled",
old: driverWithCapacityEnabled,
update: driverWithCapacityDisabled,
wantCapacity: &disabled,
wantGeneration: 1,
},
{
name: "inline feature enabled, before: none, update: persistent",
old: driverWithNothing,
update: driverWithPersistent,
wantModes: resultPersistent,
wantGeneration: 1,
},
{
name: "service account token feature enabled, before: none, update: audience=gcp",

View File

@ -306,7 +306,7 @@ type CSIDriverSpec struct {
// deployed on such a cluster and the deployment determines which mode that is, for example
// via a command line parameter of the driver.
//
// This field is immutable.
// This field was immutable in Kubernetes < 1.29 and now is mutable.
//
// +optional
PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"`
@ -353,7 +353,7 @@ type CSIDriverSpec struct {
// permission of the volume before being mounted.
// Refer to the specific FSGroupPolicy values for additional details.
//
// This field is immutable.
// This field was immutable in Kubernetes < 1.29 and now is mutable.
//
// Defaults to ReadWriteOnceWithFSType, which will examine each volume
// to determine if Kubernetes should modify ownership and permissions of the volume.

View File

@ -248,6 +248,17 @@ func (m *mockDriverSetup) cleanup(ctx context.Context) {
framework.ExpectNoError(err, "while cleaning up after test")
}
func (m *mockDriverSetup) update(o utils.PatchCSIOptions) {
item, err := m.cs.StorageV1().CSIDrivers().Get(context.TODO(), m.config.GetUniqueDriverName(), metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get CSIDriver %v", m.config.GetUniqueDriverName())
err = utils.PatchCSIDeployment(nil, o, item)
framework.ExpectNoError(err, "Failed to apply %v to CSIDriver object %v", o, m.config.GetUniqueDriverName())
_, err = m.cs.StorageV1().CSIDrivers().Update(context.TODO(), item, metav1.UpdateOptions{})
framework.ExpectNoError(err, "Failed to update CSIDriver %v", m.config.GetUniqueDriverName())
}
func (m *mockDriverSetup) createPod(ctx context.Context, withVolume volumeType) (class *storagev1.StorageClass, claim *v1.PersistentVolumeClaim, pod *v1.Pod) {
ginkgo.By("Creating pod")
f := m.f
@ -722,8 +733,8 @@ func checkPodLogs(ctx context.Context, getCalls func(ctx context.Context) ([]dri
switch call.Method {
case "NodePublishVolume":
numNodePublishVolume++
if numNodePublishVolume == 1 {
// Check that NodePublish had expected attributes for first volume
if numNodePublishVolume == expectedNumNodePublish {
// Check that NodePublish had expected attributes for last of expected volume
for k, v := range expectedAttributes {
vv, found := call.Request.VolumeContext[k]
if found && (v == vv || (v == "<nonempty>" && len(vv) != 0)) {

View File

@ -21,9 +21,9 @@ import (
"fmt"
"math/rand"
"strconv"
"time"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@ -74,47 +74,67 @@ var _ = utils.SIGDescribe("CSI Mock volume fsgroup policies", func() {
})
ginkgo.DeferCleanup(m.cleanup)
// kube-scheduler may need some time before it gets the CSIDriver object.
// Without them, scheduling doesn't run as expected by the test.
syncDelay := 5 * time.Second
time.Sleep(syncDelay)
waitUtilFSGroupInPod(ctx, m, test.modified)
fsGroupVal := int64(rand.Int63n(20000) + 1024)
fsGroup := &fsGroupVal
// The created resources will be removed by the cleanup() function,
// so need to delete anything here.
})
}
})
_, _, pod := m.createPodWithFSGroup(ctx, fsGroup) /* persistent volume */
mountPath := pod.Spec.Containers[0].VolumeMounts[0].MountPath
dirName := mountPath + "/" + f.UniqueName
fileName := dirName + "/" + f.UniqueName
err := e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace)
framework.ExpectNoError(err, "failed to start pod")
// Create the subdirectory to ensure that fsGroup propagates
createDirectory := fmt.Sprintf("mkdir %s", dirName)
_, _, err = e2evolume.PodExec(f, pod, createDirectory)
framework.ExpectNoError(err, "failed: creating the directory: %s", err)
// Inject the contents onto the mount
createFile := fmt.Sprintf("echo '%s' > '%s'; sync", "filecontents", fileName)
_, _, err = e2evolume.PodExec(f, pod, createFile)
framework.ExpectNoError(err, "failed: writing the contents: %s", err)
// Delete the created file. This step is mandatory, as the mock driver
// won't clean up the contents automatically.
defer func() {
deleteDir := fmt.Sprintf("rm -fr %s", dirName)
_, _, err = e2evolume.PodExec(f, pod, deleteDir)
framework.ExpectNoError(err, "failed: deleting the directory: %s", err)
}()
// Ensure that the fsGroup matches what we expect
if test.modified {
utils.VerifyFSGroupInPod(f, fileName, strconv.FormatInt(*fsGroup, 10), pod)
} else {
utils.VerifyFSGroupInPod(f, fileName, "root", pod)
ginkgo.Context("CSI FSGroupPolicy Update [LinuxOnly]", func() {
tests := []struct {
name string
oldFSGroupPolicy storagev1.FSGroupPolicy
newFSGroupPolicy storagev1.FSGroupPolicy
}{
{
name: "should update fsGroup if update from None to File",
oldFSGroupPolicy: storagev1.NoneFSGroupPolicy,
newFSGroupPolicy: storagev1.FileFSGroupPolicy,
},
{
name: "should update fsGroup if update from None to default",
oldFSGroupPolicy: storagev1.NoneFSGroupPolicy,
newFSGroupPolicy: storagev1.ReadWriteOnceWithFSTypeFSGroupPolicy,
},
{
name: "should not update fsGroup if update from File to None",
oldFSGroupPolicy: storagev1.FileFSGroupPolicy,
newFSGroupPolicy: storagev1.NoneFSGroupPolicy,
},
{
name: "should update fsGroup if update from File to default",
oldFSGroupPolicy: storagev1.FileFSGroupPolicy,
newFSGroupPolicy: storagev1.ReadWriteOnceWithFSTypeFSGroupPolicy,
},
{
name: "should not update fsGroup if update from detault to None",
oldFSGroupPolicy: storagev1.ReadWriteOnceWithFSTypeFSGroupPolicy,
newFSGroupPolicy: storagev1.NoneFSGroupPolicy,
},
{
name: "should update fsGroup if update from detault to File",
oldFSGroupPolicy: storagev1.ReadWriteOnceWithFSTypeFSGroupPolicy,
newFSGroupPolicy: storagev1.FileFSGroupPolicy,
},
}
for _, t := range tests {
test := t
ginkgo.It(test.name, func(ctx context.Context) {
if framework.NodeOSDistroIs("windows") {
e2eskipper.Skipf("FSGroupPolicy is only applied on linux nodes -- skipping")
}
m.init(ctx, testParameters{
disableAttach: true,
registerDriver: true,
fsGroupPolicy: &test.oldFSGroupPolicy,
})
ginkgo.DeferCleanup(m.cleanup)
waitUtilFSGroupInPod(ctx, m, test.oldFSGroupPolicy != storagev1.NoneFSGroupPolicy)
m.update(utils.PatchCSIOptions{FSGroupPolicy: &test.newFSGroupPolicy})
waitUtilFSGroupInPod(ctx, m, test.newFSGroupPolicy != storagev1.NoneFSGroupPolicy)
// The created resources will be removed by the cleanup() function,
// so need to delete anything here.
@ -122,3 +142,56 @@ var _ = utils.SIGDescribe("CSI Mock volume fsgroup policies", func() {
}
})
})
func waitUtilFSGroupInPod(ctx context.Context, m *mockDriverSetup, modified bool) {
var err error
utils.WaitUntil(framework.Poll, framework.PodStartTimeout, func() bool {
err = gomega.InterceptGomegaFailure(func() {
fsGroupVal := int64(rand.Int63n(20000) + 1024)
fsGroup := &fsGroupVal
_, _, pod := m.createPodWithFSGroup(ctx, fsGroup) /* persistent volume */
defer func() {
err = e2epod.DeletePodWithWait(context.TODO(), m.f.ClientSet, pod)
framework.ExpectNoError(err, "failed: deleting the pod: %s", err)
}()
mountPath := pod.Spec.Containers[0].VolumeMounts[0].MountPath
dirName := mountPath + "/" + m.f.UniqueName
fileName := dirName + "/" + m.f.UniqueName
err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace)
framework.ExpectNoError(err, "failed to start pod")
// Create the subdirectory to ensure that fsGroup propagates
createDirectory := fmt.Sprintf("mkdir %s", dirName)
_, _, err = e2evolume.PodExec(m.f, pod, createDirectory)
framework.ExpectNoError(err, "failed: creating the directory: %s", err)
// Inject the contents onto the mount
createFile := fmt.Sprintf("echo '%s' > '%s'; sync", "filecontents", fileName)
_, _, err = e2evolume.PodExec(m.f, pod, createFile)
framework.ExpectNoError(err, "failed: writing the contents: %s", err)
// Delete the created file. This step is mandatory, as the mock driver
// won't clean up the contents automatically.
defer func() {
deleteDir := fmt.Sprintf("rm -fr %s", dirName)
_, _, err = e2evolume.PodExec(m.f, pod, deleteDir)
framework.ExpectNoError(err, "failed: deleting the directory: %s", err)
}()
// Ensure that the fsGroup matches what we expect
if modified {
utils.VerifyFSGroupInPod(m.f, fileName, strconv.FormatInt(*fsGroup, 10), pod)
} else {
utils.VerifyFSGroupInPod(m.f, fileName, "root", pod)
}
})
return err == nil
})
framework.ExpectNoError(err, "failed: verifing fsgroup in pod: %s", err)
}

View File

@ -20,6 +20,7 @@ import (
"context"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
@ -34,7 +35,6 @@ var _ = utils.SIGDescribe("CSI Mock workload info", func() {
m := newMockDriverSetup(f)
ginkgo.Context("CSI workload information using mock driver", func() {
var (
err error
podInfoTrue = true
podInfoFalse = false
)
@ -89,36 +89,99 @@ var _ = utils.SIGDescribe("CSI Mock workload info", func() {
ginkgo.DeferCleanup(m.cleanup)
withVolume := pvcReference
if test.expectEphemeral {
withVolume = csiEphemeral
}
_, _, pod := m.createPod(ctx, withVolume)
if pod == nil {
return
}
err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace)
framework.ExpectNoError(err, "Failed to start pod: %v", err)
waitUntilPodInfoInLog(ctx, m, test.expectPodInfo, test.expectEphemeral, 1)
// If we expect an ephemeral volume, the feature has to be enabled.
// Otherwise need to check if we expect pod info, because the content
// of that depends on whether the feature is enabled or not.
csiInlineVolumesEnabled := test.expectEphemeral
if test.expectPodInfo {
ginkgo.By("checking for CSIInlineVolumes feature")
csiInlineVolumesEnabled, err = testsuites.CSIInlineVolumesEnabled(ctx, m.cs, f.Timeouts, f.Namespace.Name)
framework.ExpectNoError(err, "failed to test for CSIInlineVolumes")
}
})
}
})
ginkgo.By("Deleting the previously created pod")
err = e2epod.DeletePodWithWait(ctx, m.cs, pod)
framework.ExpectNoError(err, "while deleting")
ginkgo.Context("CSI PodInfoOnMount Update", func() {
var (
podInfoTrue = true
podInfoFalse = false
)
tests := []struct {
name string
oldPodInfoOnMount *bool
newPodInfoOnMount *bool
}{
{
name: "should not be passed when update from true to false",
oldPodInfoOnMount: &podInfoTrue,
newPodInfoOnMount: &podInfoFalse,
},
{
name: "should not be passed when update from true to nil",
oldPodInfoOnMount: &podInfoTrue,
newPodInfoOnMount: nil,
},
{
name: "should be passed when update from false to true",
oldPodInfoOnMount: &podInfoFalse,
newPodInfoOnMount: &podInfoTrue,
},
{
name: "should be passed when update from nil to true",
oldPodInfoOnMount: nil,
newPodInfoOnMount: &podInfoTrue,
},
}
for _, t := range tests {
test := t
ginkgo.It(t.name, func(ctx context.Context) {
m.init(ctx, testParameters{
registerDriver: true,
podInfo: test.oldPodInfoOnMount})
ginkgo.DeferCleanup(m.cleanup)
waitUntilPodInfoInLog(ctx, m, test.oldPodInfoOnMount != nil && *test.oldPodInfoOnMount, false, 1)
m.update(utils.PatchCSIOptions{PodInfo: test.newPodInfoOnMount})
waitUntilPodInfoInLog(ctx, m, test.newPodInfoOnMount != nil && *test.newPodInfoOnMount, false, 2)
ginkgo.By("Checking CSI driver logs")
err = checkPodLogs(ctx, m.driver.GetCalls, pod, test.expectPodInfo, test.expectEphemeral, csiInlineVolumesEnabled, false, 1)
framework.ExpectNoError(err)
})
}
})
})
func waitUntilPodInfoInLog(ctx context.Context, m *mockDriverSetup, expectPodInfo, expectEphemeral bool, expectedNumNodePublish int) {
var err error
utils.WaitUntil(framework.Poll, framework.PodStartTimeout, func() bool {
err = gomega.InterceptGomegaFailure(func() {
withVolume := pvcReference
if expectEphemeral {
withVolume = csiEphemeral
}
_, _, pod := m.createPod(ctx, withVolume)
if pod == nil {
return
}
err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace)
framework.ExpectNoError(err, "Failed to start pod: %v", err)
// If we expect an ephemeral volume, the feature has to be enabled.
// Otherwise need to check if we expect pod info, because the content
// of that depends on whether the feature is enabled or not.
csiInlineVolumesEnabled := expectEphemeral
if expectPodInfo {
ginkgo.By("checking for CSIInlineVolumes feature")
csiInlineVolumesEnabled, err = testsuites.CSIInlineVolumesEnabled(ctx, m.cs, m.f.Timeouts, m.f.Namespace.Name)
framework.ExpectNoError(err, "failed to test for CSIInlineVolumes")
}
ginkgo.By("Deleting the previously created pod")
err = e2epod.DeletePodWithWait(ctx, m.cs, pod)
framework.ExpectNoError(err, "while deleting")
ginkgo.By("Checking CSI driver logs")
err = checkPodLogs(ctx, m.driver.GetCalls, pod, expectPodInfo, expectEphemeral, csiInlineVolumesEnabled, false, expectedNumNodePublish)
framework.ExpectNoError(err)
})
return err == nil
})
framework.ExpectNoError(err, "failed: verifing PodInfo: %s", err)
}