Merge pull request #105659 from pohly/generic-ephemeral-volume-test

generic ephemeral volume E2E tests
This commit is contained in:
Kubernetes Prow Robot 2021-10-20 15:16:04 -07:00 committed by GitHub
commit 656f3752d2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 359 additions and 186 deletions

View File

@ -314,6 +314,14 @@ var (
SnapshotDeletionPolicy: DeleteSnapshot, SnapshotDeletionPolicy: DeleteSnapshot,
VolType: DynamicPV, VolType: DynamicPV,
} }
// EphemeralSnapshotDelete is TestPattern for snapshotting of a generic ephemeral volume
// where snapshots are deleted.
EphemeralSnapshotDelete = TestPattern{
Name: "Ephemeral Snapshot (delete policy)",
SnapshotType: DynamicCreatedSnapshot,
SnapshotDeletionPolicy: DeleteSnapshot,
VolType: GenericEphemeralVolume,
}
// DynamicSnapshotRetain is TestPattern for "Dynamic snapshot" // DynamicSnapshotRetain is TestPattern for "Dynamic snapshot"
DynamicSnapshotRetain = TestPattern{ DynamicSnapshotRetain = TestPattern{
Name: "Dynamic Snapshot (retain policy)", Name: "Dynamic Snapshot (retain policy)",
@ -328,6 +336,14 @@ var (
SnapshotDeletionPolicy: RetainSnapshot, SnapshotDeletionPolicy: RetainSnapshot,
VolType: DynamicPV, VolType: DynamicPV,
} }
// EphemeralSnapshotDelete is TestPattern for snapshotting of a generic ephemeral volume
// where snapshots are preserved.
EphemeralSnapshotRetain = TestPattern{
Name: "Ephemeral Snapshot (retain policy)",
SnapshotType: DynamicCreatedSnapshot,
SnapshotDeletionPolicy: RetainSnapshot,
VolType: GenericEphemeralVolume,
}
// Definitions for volume expansion case // Definitions for volume expansion case

View File

@ -698,6 +698,16 @@ func RunInPodWithVolume(c clientset.Interface, t *framework.TimeoutContext, ns,
// StartInPodWithVolume starts a command in a pod with given claim mounted to /mnt directory // StartInPodWithVolume starts a command in a pod with given claim mounted to /mnt directory
// The caller is responsible for checking the pod and deleting it. // The caller is responsible for checking the pod and deleting it.
func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node e2epod.NodeSelection) *v1.Pod { func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node e2epod.NodeSelection) *v1.Pod {
return StartInPodWithVolumeSource(c, v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claimName,
},
}, ns, podName, command, node)
}
// StartInPodWithVolumeSource starts a command in a pod with given volume mounted to /mnt directory
// The caller is responsible for checking the pod and deleting it.
func StartInPodWithVolumeSource(c clientset.Interface, volSrc v1.VolumeSource, ns, podName, command string, node e2epod.NodeSelection) *v1.Pod {
pod := &v1.Pod{ pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
Kind: "Pod", Kind: "Pod",
@ -727,12 +737,7 @@ func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command
Volumes: []v1.Volume{ Volumes: []v1.Volume{
{ {
Name: "my-volume", Name: "my-volume",
VolumeSource: v1.VolumeSource{ VolumeSource: volSrc,
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claimName,
ReadOnly: false,
},
},
}, },
}, },
}, },

View File

@ -29,9 +29,9 @@ import (
storagev1 "k8s.io/api/storage/v1" storagev1 "k8s.io/api/storage/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/component-helpers/storage/ephemeral"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
@ -75,6 +75,8 @@ func InitSnapshottableTestSuite() storageframework.TestSuite {
patterns := []storageframework.TestPattern{ patterns := []storageframework.TestPattern{
storageframework.DynamicSnapshotDelete, storageframework.DynamicSnapshotDelete,
storageframework.DynamicSnapshotRetain, storageframework.DynamicSnapshotRetain,
storageframework.EphemeralSnapshotDelete,
storageframework.EphemeralSnapshotRetain,
storageframework.PreprovisionedSnapshotDelete, storageframework.PreprovisionedSnapshotDelete,
storageframework.PreprovisionedSnapshotRetain, storageframework.PreprovisionedSnapshotRetain,
} }
@ -116,6 +118,8 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
dc dynamic.Interface dc dynamic.Interface
pvc *v1.PersistentVolumeClaim pvc *v1.PersistentVolumeClaim
sc *storagev1.StorageClass sc *storagev1.StorageClass
volumeResource *storageframework.VolumeResource
pod *v1.Pod
claimSize string claimSize string
originalMntTestData string originalMntTestData string
) )
@ -131,35 +135,212 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
config, driverCleanup = driver.PrepareTest(f) config, driverCleanup = driver.PrepareTest(f)
cleanupSteps = append(cleanupSteps, driverCleanup) cleanupSteps = append(cleanupSteps, driverCleanup)
var volumeResource *storageframework.VolumeResource
cleanupSteps = append(cleanupSteps, func() { cleanupSteps = append(cleanupSteps, func() {
framework.ExpectNoError(volumeResource.CleanupResource()) framework.ExpectNoError(volumeResource.CleanupResource())
}) })
volumeResource = storageframework.CreateVolumeResource(dDriver, config, pattern, s.GetTestSuiteInfo().SupportedSizeRange) volumeResource = storageframework.CreateVolumeResource(dDriver, config, pattern, s.GetTestSuiteInfo().SupportedSizeRange)
pvc = volumeResource.Pvc
sc = volumeResource.Sc
claimSize = pvc.Spec.Resources.Requests.Storage().String()
ginkgo.By("[init] starting a pod to use the claim") ginkgo.By("[init] starting a pod to use the claim")
originalMntTestData = fmt.Sprintf("hello from %s namespace", pvc.GetNamespace()) originalMntTestData = fmt.Sprintf("hello from %s namespace", f.Namespace.Name)
command := fmt.Sprintf("echo '%s' > %s", originalMntTestData, datapath) command := fmt.Sprintf("echo '%s' > %s", originalMntTestData, datapath)
pod := RunInPodWithVolume(cs, f.Timeouts, pvc.Namespace, pvc.Name, "pvc-snapshottable-tester", command, config.ClientNodeSelection) pod = StartInPodWithVolumeSource(cs, *volumeResource.VolSource, f.Namespace.Name, "pvc-snapshottable-tester", command, config.ClientNodeSelection)
cleanupSteps = append(cleanupSteps, func() {
e2epod.DeletePodWithWait(cs, pod)
})
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, f.Timeouts.ClaimProvision) // At this point a pod is running with a PVC. How to proceed depends on which test is running.
framework.ExpectNoError(err) }
cleanup := func() {
// Don't register an AfterEach then a cleanup step because the order
// of execution will do the AfterEach first then the cleanup step.
// Also AfterEach cleanup registration is not fine grained enough
// Adding to the cleanup steps allows you to register cleanup only when it is needed
// Ideally we could replace this with https://golang.org/pkg/testing/#T.Cleanup
// Depending on how far the test executed, cleanup accordingly
// Execute in reverse order, similar to defer stack
for i := len(cleanupSteps) - 1; i >= 0; i-- {
err := storageutils.TryFunc(cleanupSteps[i])
framework.ExpectNoError(err, "while running cleanup steps")
}
}
ginkgo.AfterEach(func() {
cleanup()
})
ginkgo.Context("", func() {
ginkgo.It("should check snapshot fields, check restore correctly works, check deletion (ephemeral)", func() {
if pattern.VolType != storageframework.GenericEphemeralVolume {
e2eskipper.Skipf("volume type %q is not ephemeral", pattern.VolType)
}
init()
// We can test snapshotting of generic
// ephemeral volumes by creating the snapshot
// while the pod is running (online). We cannot do it after pod deletion,
// because then the PVC also gets marked and snapshotting no longer works
// (even when a finalizer prevents actual removal of the PVC).
//
// Because data consistency cannot be
// guaranteed, this flavor of the test doesn't
// check the content of the snapshot.
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(cs, pod.Name, pod.Namespace, f.Timeouts.PodStartSlow))
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "check pod after it terminated")
// Get new copy of the claim // Get new copy of the claim
ginkgo.By("[init] checking the claim") ginkgo.By("[init] checking the claim")
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) pvcName := ephemeral.VolumeClaimName(pod, &pod.Spec.Volumes[0])
pvcNamespace := pod.Namespace
parameters := map[string]string{}
sr := storageframework.CreateSnapshotResource(sDriver, config, pattern, pvcName, pvcNamespace, f.Timeouts, parameters)
cleanupSteps = append(cleanupSteps, func() {
framework.ExpectNoError(sr.CleanupResource(f.Timeouts))
})
vs := sr.Vs
vsc := sr.Vsclass
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvcNamespace, pvcName, framework.Poll, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err) framework.ExpectNoError(err)
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvcNamespace).Get(context.TODO(), pvcName, metav1.GetOptions{})
framework.ExpectNoError(err, "get PVC")
claimSize = pvc.Spec.Resources.Requests.Storage().String()
sc = volumeResource.Sc
// Get the bound PV
ginkgo.By("[init] checking the PV")
_, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
framework.ExpectNoError(err)
// Get new copy of the snapshot
ginkgo.By("checking the snapshot")
vs, err = dc.Resource(storageutils.SnapshotGVR).Namespace(vs.GetNamespace()).Get(context.TODO(), vs.GetName(), metav1.GetOptions{})
framework.ExpectNoError(err)
// Get the bound snapshotContent
snapshotStatus := vs.Object["status"].(map[string]interface{})
snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string)
vscontent, err := dc.Resource(storageutils.SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{})
framework.ExpectNoError(err)
snapshotContentSpec := vscontent.Object["spec"].(map[string]interface{})
volumeSnapshotRef := snapshotContentSpec["volumeSnapshotRef"].(map[string]interface{})
var restoredPVC *v1.PersistentVolumeClaim
var restoredPod *v1.Pod
// Check SnapshotContent properties
ginkgo.By("checking the SnapshotContent")
// PreprovisionedCreatedSnapshot do not need to set volume snapshot class name
if pattern.SnapshotType != storageframework.PreprovisionedCreatedSnapshot {
framework.ExpectEqual(snapshotContentSpec["volumeSnapshotClassName"], vsc.GetName())
}
framework.ExpectEqual(volumeSnapshotRef["name"], vs.GetName())
framework.ExpectEqual(volumeSnapshotRef["namespace"], vs.GetNamespace())
ginkgo.By("creating a pvc from the snapshot")
restoredPVC = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: claimSize,
StorageClassName: &(sc.Name),
}, config.Framework.Namespace.Name)
group := "snapshot.storage.k8s.io"
restoredPVC.Spec.DataSource = &v1.TypedLocalObjectReference{
APIGroup: &group,
Kind: "VolumeSnapshot",
Name: vs.GetName(),
}
ginkgo.By("starting a pod to use the snapshot")
volSrc := v1.VolumeSource{
Ephemeral: &v1.EphemeralVolumeSource{
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
Spec: restoredPVC.Spec,
},
},
}
restoredPod = StartInPodWithVolumeSource(cs, volSrc, restoredPVC.Namespace, "restored-pvc-tester", "sleep 300", config.ClientNodeSelection)
cleanupSteps = append(cleanupSteps, func() {
StopPod(cs, restoredPod)
})
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow))
if pattern.VolType != storageframework.GenericEphemeralVolume {
commands := e2evolume.GenerateReadFileCmd(datapath)
_, err = framework.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute)
framework.ExpectNoError(err)
}
ginkgo.By("should delete the VolumeSnapshotContent according to its deletion policy")
// Delete both Snapshot and restored Pod/PVC at the same time because different storage systems
// have different ordering of deletion. Some may require delete the restored PVC first before
// Snapshot deletion and some are opposite.
err = storageutils.DeleteSnapshotWithoutWaiting(dc, vs.GetNamespace(), vs.GetName())
framework.ExpectNoError(err)
// Wait for the Snapshot to be actually deleted from API server
err = storageutils.WaitForNamespacedGVRDeletion(dc, storageutils.SnapshotGVR, vs.GetNamespace(), vs.GetNamespace(), framework.Poll, f.Timeouts.SnapshotDelete)
framework.ExpectNoError(err)
switch pattern.SnapshotDeletionPolicy {
case storageframework.DeleteSnapshot:
ginkgo.By("checking the SnapshotContent has been deleted")
err = utils.WaitForGVRDeletion(dc, storageutils.SnapshotContentGVR, vscontent.GetName(), framework.Poll, f.Timeouts.SnapshotDelete)
framework.ExpectNoError(err)
case storageframework.RetainSnapshot:
ginkgo.By("checking the SnapshotContent has not been deleted")
err = utils.WaitForGVRDeletion(dc, storageutils.SnapshotContentGVR, vscontent.GetName(), 1*time.Second /* poll */, 30*time.Second /* timeout */)
framework.ExpectError(err)
}
})
ginkgo.It("should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)", func() {
if pattern.VolType == storageframework.GenericEphemeralVolume {
e2eskipper.Skipf("volume type %q is ephemeral", pattern.VolType)
}
init()
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(cs, pod.Name, pod.Namespace, f.Timeouts.PodStartSlow))
pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "check pod after it terminated")
// Get new copy of the claim
ginkgo.By("[init] checking the claim")
pvcName := volumeResource.Pvc.Name
pvcNamespace := volumeResource.Pvc.Namespace
parameters := map[string]string{}
sr := storageframework.CreateSnapshotResource(sDriver, config, pattern, pvcName, pvcNamespace, f.Timeouts, parameters)
cleanupSteps = append(cleanupSteps, func() {
framework.ExpectNoError(sr.CleanupResource(f.Timeouts))
})
vs := sr.Vs
vsc := sr.Vsclass
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvcNamespace, pvcName, framework.Poll, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err)
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvcNamespace).Get(context.TODO(), pvcName, metav1.GetOptions{})
framework.ExpectNoError(err, "get PVC")
claimSize = pvc.Spec.Resources.Requests.Storage().String()
sc = volumeResource.Sc
// Get the bound PV // Get the bound PV
ginkgo.By("[init] checking the PV") ginkgo.By("[init] checking the PV")
pv, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) pv, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("[init] deleting the pod")
StopPod(cs, pod)
// At this point we know that: // At this point we know that:
// - a pod was created with a PV that's supposed to have data // - a pod was created with a PV that's supposed to have data
// //
@ -201,51 +382,10 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
} }
return true return true
}) })
framework.ExpectEqual(success, true) if !success {
framework.Failf("timed out waiting for node=%s to not use the volume=%s", nodeName, volumeName)
} }
cleanup := func() {
// Don't register an AfterEach then a cleanup step because the order
// of execution will do the AfterEach first then the cleanup step.
// Also AfterEach cleanup registration is not fine grained enough
// Adding to the cleanup steps allows you to register cleanup only when it is needed
// Ideally we could replace this with https://golang.org/pkg/testing/#T.Cleanup
// Depending on how far the test executed, cleanup accordingly
// Execute in reverse order, similar to defer stack
for i := len(cleanupSteps) - 1; i >= 0; i-- {
err := storageutils.TryFunc(cleanupSteps[i])
framework.ExpectNoError(err, "while running cleanup steps")
}
}
ginkgo.BeforeEach(func() {
init()
})
ginkgo.AfterEach(func() {
cleanup()
})
ginkgo.Context("", func() {
var (
vs *unstructured.Unstructured
vscontent *unstructured.Unstructured
vsc *unstructured.Unstructured
)
ginkgo.BeforeEach(func() {
var sr *storageframework.SnapshotResource
cleanupSteps = append(cleanupSteps, func() {
framework.ExpectNoError(sr.CleanupResource(f.Timeouts))
})
parameters := map[string]string{}
sr = storageframework.CreateSnapshotResource(sDriver, config, pattern, pvc.GetName(), pvc.GetNamespace(), f.Timeouts, parameters)
vs = sr.Vs
vscontent = sr.Vscontent
vsc = sr.Vsclass
})
ginkgo.It("should check snapshot fields, check restore correctly works after modifying source data, check deletion", func() {
// Get new copy of the snapshot // Get new copy of the snapshot
ginkgo.By("checking the snapshot") ginkgo.By("checking the snapshot")
vs, err = dc.Resource(storageutils.SnapshotGVR).Namespace(vs.GetNamespace()).Get(context.TODO(), vs.GetName(), metav1.GetOptions{}) vs, err = dc.Resource(storageutils.SnapshotGVR).Namespace(vs.GetNamespace()).Get(context.TODO(), vs.GetName(), metav1.GetOptions{})
@ -254,12 +394,15 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
// Get the bound snapshotContent // Get the bound snapshotContent
snapshotStatus := vs.Object["status"].(map[string]interface{}) snapshotStatus := vs.Object["status"].(map[string]interface{})
snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string) snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string)
vscontent, err = dc.Resource(storageutils.SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{}) vscontent, err := dc.Resource(storageutils.SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
snapshotContentSpec := vscontent.Object["spec"].(map[string]interface{}) snapshotContentSpec := vscontent.Object["spec"].(map[string]interface{})
volumeSnapshotRef := snapshotContentSpec["volumeSnapshotRef"].(map[string]interface{}) volumeSnapshotRef := snapshotContentSpec["volumeSnapshotRef"].(map[string]interface{})
var restoredPVC *v1.PersistentVolumeClaim
var restoredPod *v1.Pod
// Check SnapshotContent properties // Check SnapshotContent properties
ginkgo.By("checking the SnapshotContent") ginkgo.By("checking the SnapshotContent")
// PreprovisionedCreatedSnapshot do not need to set volume snapshot class name // PreprovisionedCreatedSnapshot do not need to set volume snapshot class name
@ -270,8 +413,6 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
framework.ExpectEqual(volumeSnapshotRef["namespace"], vs.GetNamespace()) framework.ExpectEqual(volumeSnapshotRef["namespace"], vs.GetNamespace())
ginkgo.By("Modifying source data test") ginkgo.By("Modifying source data test")
var restoredPVC *v1.PersistentVolumeClaim
var restoredPod *v1.Pod
modifiedMntTestData := fmt.Sprintf("modified data from %s namespace", pvc.GetNamespace()) modifiedMntTestData := fmt.Sprintf("modified data from %s namespace", pvc.GetNamespace())
ginkgo.By("modifying the data in the source PVC") ginkgo.By("modifying the data in the source PVC")
@ -304,16 +445,17 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
} }
}) })
ginkgo.By("starting a pod to use the claim") ginkgo.By("starting a pod to use the snapshot")
restoredPod = StartInPodWithVolume(cs, restoredPVC.Namespace, restoredPVC.Name, "restored-pvc-tester", "sleep 300", config.ClientNodeSelection) restoredPod = StartInPodWithVolume(cs, restoredPVC.Namespace, restoredPVC.Name, "restored-pvc-tester", "sleep 300", config.ClientNodeSelection)
cleanupSteps = append(cleanupSteps, func() { cleanupSteps = append(cleanupSteps, func() {
StopPod(cs, restoredPod) StopPod(cs, restoredPod)
}) })
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow)) framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow))
if pattern.VolType != storageframework.GenericEphemeralVolume {
commands := e2evolume.GenerateReadFileCmd(datapath) commands := e2evolume.GenerateReadFileCmd(datapath)
_, err = framework.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute) _, err = framework.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}
ginkgo.By("should delete the VolumeSnapshotContent according to its deletion policy") ginkgo.By("should delete the VolumeSnapshotContent according to its deletion policy")

View File

@ -32,6 +32,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/component-helpers/storage/ephemeral"
migrationplugins "k8s.io/csi-translation-lib/plugins" // volume plugin names are exported nicely there migrationplugins "k8s.io/csi-translation-lib/plugins" // volume plugin names are exported nicely there
volumeutil "k8s.io/kubernetes/pkg/volume/util" volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -74,6 +75,7 @@ func InitCustomVolumeLimitsTestSuite(patterns []storageframework.TestPattern) st
func InitVolumeLimitsTestSuite() storageframework.TestSuite { func InitVolumeLimitsTestSuite() storageframework.TestSuite {
patterns := []storageframework.TestPattern{ patterns := []storageframework.TestPattern{
storageframework.FsVolModeDynamicPV, storageframework.FsVolModeDynamicPV,
storageframework.DefaultFsGenericEphemeralVolume,
} }
return InitCustomVolumeLimitsTestSuite(patterns) return InitCustomVolumeLimitsTestSuite(patterns)
} }
@ -95,14 +97,14 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
// VolumeResource contains pv, pvc, sc, etc. of the first pod created // VolumeResource contains pv, pvc, sc, etc. of the first pod created
resource *storageframework.VolumeResource resource *storageframework.VolumeResource
// All created PVCs, incl. the one in resource // All created PVCs
pvcs []*v1.PersistentVolumeClaim pvcNames []string
// All created Pods
podNames []string
// All created PVs, incl. the one in resource // All created PVs, incl. the one in resource
pvNames sets.String pvNames sets.String
runningPod *v1.Pod
unschedulablePod *v1.Pod
} }
var ( var (
l local l local
@ -164,12 +166,23 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
framework.ExpectNoError(err, "while cleaning up resource") framework.ExpectNoError(err, "while cleaning up resource")
}() }()
defer func() { defer func() {
cleanupTest(l.cs, l.ns.Name, l.runningPod.Name, l.unschedulablePod.Name, l.pvcs, l.pvNames, testSlowMultiplier*f.Timeouts.PVDelete) cleanupTest(l.cs, l.ns.Name, l.podNames, l.pvcNames, l.pvNames, testSlowMultiplier*f.Timeouts.PVDelete)
}() }()
// Create <limit> PVCs for one gigantic pod. selection := e2epod.NodeSelection{Name: nodeName}
ginkgo.By(fmt.Sprintf("Creating %d PVC(s)", limit))
if pattern.VolType == storageframework.GenericEphemeralVolume {
// Create <limit> Pods.
ginkgo.By(fmt.Sprintf("Creating %d Pod(s) with one volume each", limit))
for i := 0; i < limit; i++ {
pod := StartInPodWithVolumeSource(l.cs, *l.resource.VolSource, l.ns.Name, "volume-limits", "sleep 1000000", selection)
l.podNames = append(l.podNames, pod.Name)
l.pvcNames = append(l.pvcNames, ephemeral.VolumeClaimName(pod, &pod.Spec.Volumes[0]))
}
} else {
// Create <limit> PVCs for one gigantic pod.
var pvcs []*v1.PersistentVolumeClaim
ginkgo.By(fmt.Sprintf("Creating %d PVC(s)", limit))
for i := 0; i < limit; i++ { for i := 0; i < limit; i++ {
pvc := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ pvc := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: claimSize, ClaimSize: claimSize,
@ -177,44 +190,40 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
}, l.ns.Name) }, l.ns.Name)
pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(context.TODO(), pvc, metav1.CreateOptions{}) pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(context.TODO(), pvc, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
l.pvcs = append(l.pvcs, pvc) l.pvcNames = append(l.pvcNames, pvc.Name)
pvcs = append(pvcs, pvc)
} }
ginkgo.By("Creating pod to use all PVC(s)") ginkgo.By("Creating pod to use all PVC(s)")
selection := e2epod.NodeSelection{Name: nodeName}
podConfig := e2epod.Config{ podConfig := e2epod.Config{
NS: l.ns.Name, NS: l.ns.Name,
PVCs: l.pvcs, PVCs: pvcs,
SeLinuxLabel: e2epv.SELinuxLabel, SeLinuxLabel: e2epv.SELinuxLabel,
NodeSelection: selection, NodeSelection: selection,
} }
pod, err := e2epod.MakeSecPod(&podConfig) pod, err := e2epod.MakeSecPod(&podConfig)
framework.ExpectNoError(err) framework.ExpectNoError(err)
l.runningPod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
l.podNames = append(l.podNames, pod.Name)
}
ginkgo.By("Waiting for all PVCs to get Bound") ginkgo.By("Waiting for all PVCs to get Bound")
l.pvNames, err = waitForAllPVCsBound(l.cs, testSlowMultiplier*f.Timeouts.PVBound, l.pvcs) l.pvNames, err = waitForAllPVCsBound(l.cs, testSlowMultiplier*f.Timeouts.PVBound, l.ns.Name, l.pvcNames)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Waiting for the pod Running") ginkgo.By("Waiting for the pod(s) running")
err = e2epod.WaitTimeoutForPodRunningInNamespace(l.cs, l.runningPod.Name, l.ns.Name, testSlowMultiplier*f.Timeouts.PodStart) for _, podName := range l.podNames {
err = e2epod.WaitTimeoutForPodRunningInNamespace(l.cs, podName, l.ns.Name, testSlowMultiplier*f.Timeouts.PodStart)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}
ginkgo.By("Creating an extra pod with one volume to exceed the limit") ginkgo.By("Creating an extra pod with one volume to exceed the limit")
podConfig = e2epod.Config{ pod := StartInPodWithVolumeSource(l.cs, *l.resource.VolSource, l.ns.Name, "volume-limits-exceeded", "sleep 10000", selection)
NS: l.ns.Name, l.podNames = append(l.podNames, pod.Name)
PVCs: []*v1.PersistentVolumeClaim{l.resource.Pvc},
SeLinuxLabel: e2epv.SELinuxLabel,
NodeSelection: selection,
}
pod, err = e2epod.MakeSecPod(&podConfig)
framework.ExpectNoError(err)
l.unschedulablePod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create an extra pod with one volume to exceed the limit")
ginkgo.By("Waiting for the pod to get unschedulable with the right message") ginkgo.By("Waiting for the pod to get unschedulable with the right message")
err = e2epod.WaitForPodCondition(l.cs, l.ns.Name, l.unschedulablePod.Name, "Unschedulable", f.Timeouts.PodStart, func(pod *v1.Pod) (bool, error) { err = e2epod.WaitForPodCondition(l.cs, l.ns.Name, pod.Name, "Unschedulable", f.Timeouts.PodStart, func(pod *v1.Pod) (bool, error) {
if pod.Status.Phase == v1.PodPending { if pod.Status.Phase == v1.PodPending {
reg, err := regexp.Compile(`max.+volume.+count`) reg, err := regexp.Compile(`max.+volume.+count`)
if err != nil { if err != nil {
@ -270,24 +279,18 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
}) })
} }
func cleanupTest(cs clientset.Interface, ns string, runningPodName, unschedulablePodName string, pvcs []*v1.PersistentVolumeClaim, pvNames sets.String, timeout time.Duration) error { func cleanupTest(cs clientset.Interface, ns string, podNames, pvcNames []string, pvNames sets.String, timeout time.Duration) error {
var cleanupErrors []string var cleanupErrors []string
if runningPodName != "" { for _, podName := range podNames {
err := cs.CoreV1().Pods(ns).Delete(context.TODO(), runningPodName, metav1.DeleteOptions{}) err := cs.CoreV1().Pods(ns).Delete(context.TODO(), podName, metav1.DeleteOptions{})
if err != nil { if err != nil {
cleanupErrors = append(cleanupErrors, fmt.Sprintf("failed to delete pod %s: %s", runningPodName, err)) cleanupErrors = append(cleanupErrors, fmt.Sprintf("failed to delete pod %s: %s", podName, err))
} }
} }
if unschedulablePodName != "" { for _, pvcName := range pvcNames {
err := cs.CoreV1().Pods(ns).Delete(context.TODO(), unschedulablePodName, metav1.DeleteOptions{}) err := cs.CoreV1().PersistentVolumeClaims(ns).Delete(context.TODO(), pvcName, metav1.DeleteOptions{})
if err != nil { if !apierrors.IsNotFound(err) {
cleanupErrors = append(cleanupErrors, fmt.Sprintf("failed to delete pod %s: %s", unschedulablePodName, err)) cleanupErrors = append(cleanupErrors, fmt.Sprintf("failed to delete PVC %s: %s", pvcName, err))
}
}
for _, pvc := range pvcs {
err := cs.CoreV1().PersistentVolumeClaims(ns).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{})
if err != nil {
cleanupErrors = append(cleanupErrors, fmt.Sprintf("failed to delete PVC %s: %s", pvc.Name, err))
} }
} }
// Wait for the PVs to be deleted. It includes also pod and PVC deletion because of PVC protection. // Wait for the PVs to be deleted. It includes also pod and PVC deletion because of PVC protection.
@ -323,12 +326,12 @@ func cleanupTest(cs clientset.Interface, ns string, runningPodName, unschedulabl
} }
// waitForAllPVCsBound waits until the given PVCs are all bound. It then returns the bound PVC names as a set. // waitForAllPVCsBound waits until the given PVCs are all bound. It then returns the bound PVC names as a set.
func waitForAllPVCsBound(cs clientset.Interface, timeout time.Duration, pvcs []*v1.PersistentVolumeClaim) (sets.String, error) { func waitForAllPVCsBound(cs clientset.Interface, timeout time.Duration, ns string, pvcNames []string) (sets.String, error) {
pvNames := sets.NewString() pvNames := sets.NewString()
err := wait.Poll(5*time.Second, timeout, func() (bool, error) { err := wait.Poll(5*time.Second, timeout, func() (bool, error) {
unbound := 0 unbound := 0
for _, pvc := range pvcs { for _, pvcName := range pvcNames {
pvc, err := cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) pvc, err := cs.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), pvcName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -339,7 +342,7 @@ func waitForAllPVCsBound(cs clientset.Interface, timeout time.Duration, pvcs []*
} }
} }
if unbound > 0 { if unbound > 0 {
framework.Logf("%d/%d of PVCs are Bound", pvNames.Len(), len(pvcs)) framework.Logf("%d/%d of PVCs are Bound", pvNames.Len(), len(pvcNames))
return false, nil return false, nil
} }
return true, nil return true, nil

View File

@ -30,6 +30,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/component-base/metrics/testutil" "k8s.io/component-base/metrics/testutil"
"k8s.io/component-helpers/storage/ephemeral"
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
@ -211,14 +212,15 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
verifyMetricCount(storageOpMetrics, updatedStorageMetrics, "volume_provision", true) verifyMetricCount(storageOpMetrics, updatedStorageMetrics, "volume_provision", true)
} }
filesystemMode := func(ephemeral bool) { filesystemMode := func(isEphemeral bool) {
if !ephemeral { pvcName := pvc.Name
if !isEphemeral {
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNotEqual(pvc, nil) framework.ExpectNotEqual(pvc, nil)
} }
pod := makePod(ns, pvc, ephemeral) pod := makePod(ns, pvc, isEphemeral)
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -228,6 +230,11 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{}) pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
if isEphemeral {
pvcName = ephemeral.VolumeClaimName(pod, &pod.Spec.Volumes[0])
}
pvcNamespace := pod.Namespace
// Verify volume stat metrics were collected for the referenced PVC // Verify volume stat metrics were collected for the referenced PVC
volumeStatKeys := []string{ volumeStatKeys := []string{
kubeletmetrics.VolumeStatsUsedBytesKey, kubeletmetrics.VolumeStatsUsedBytesKey,
@ -251,31 +258,31 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
framework.Logf("Error fetching kubelet metrics") framework.Logf("Error fetching kubelet metrics")
return false, err return false, err
} }
if !findVolumeStatMetric(kubeletKeyName, pvc.Namespace, pvc.Name, kubeMetrics) { if !findVolumeStatMetric(kubeletKeyName, pvcNamespace, pvcName, kubeMetrics) {
return false, nil return false, nil
} }
return true, nil return true, nil
}) })
framework.ExpectNoError(waitErr, "Unable to find metric %s for PVC %s/%s", kubeletKeyName, pvc.Namespace, pvc.Name) framework.ExpectNoError(waitErr, "Unable to find metric %s for PVC %s/%s", kubeletKeyName, pvcNamespace, pvcName)
for _, key := range volumeStatKeys { for _, key := range volumeStatKeys {
kubeletKeyName := fmt.Sprintf("%s_%s", kubeletmetrics.KubeletSubsystem, key) kubeletKeyName := fmt.Sprintf("%s_%s", kubeletmetrics.KubeletSubsystem, key)
found := findVolumeStatMetric(kubeletKeyName, pvc.Namespace, pvc.Name, kubeMetrics) found := findVolumeStatMetric(kubeletKeyName, pvcNamespace, pvcName, kubeMetrics)
framework.ExpectEqual(found, true, "PVC %s, Namespace %s not found for %s", pvc.Name, pvc.Namespace, kubeletKeyName) framework.ExpectEqual(found, true, "PVC %s, Namespace %s not found for %s", pvcName, pvcNamespace, kubeletKeyName)
} }
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
} }
blockmode := func(ephemeral bool) { blockmode := func(isEphemeral bool) {
if !ephemeral { if !isEphemeral {
pvcBlock, err = c.CoreV1().PersistentVolumeClaims(pvcBlock.Namespace).Create(context.TODO(), pvcBlock, metav1.CreateOptions{}) pvcBlock, err = c.CoreV1().PersistentVolumeClaims(pvcBlock.Namespace).Create(context.TODO(), pvcBlock, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNotEqual(pvcBlock, nil) framework.ExpectNotEqual(pvcBlock, nil)
} }
pod := makePod(ns, pvcBlock, ephemeral) pod := makePod(ns, pvcBlock, isEphemeral)
pod.Spec.Containers[0].VolumeDevices = []v1.VolumeDevice{{ pod.Spec.Containers[0].VolumeDevices = []v1.VolumeDevice{{
Name: pod.Spec.Volumes[0].Name, Name: pod.Spec.Volumes[0].Name,
DevicePath: "/mnt/" + pvcBlock.Name, DevicePath: "/mnt/" + pvcBlock.Name,
@ -326,14 +333,14 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
} }
totalTime := func(ephemeral bool) { totalTime := func(isEphemeral bool) {
if !ephemeral { if !isEphemeral {
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNotEqual(pvc, nil) framework.ExpectNotEqual(pvc, nil)
} }
pod := makePod(ns, pvc, ephemeral) pod := makePod(ns, pvc, isEphemeral)
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -357,14 +364,14 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
} }
volumeManager := func(ephemeral bool) { volumeManager := func(isEphemeral bool) {
if !ephemeral { if !isEphemeral {
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNotEqual(pvc, nil) framework.ExpectNotEqual(pvc, nil)
} }
pod := makePod(ns, pvc, ephemeral) pod := makePod(ns, pvc, isEphemeral)
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -387,14 +394,14 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
} }
adController := func(ephemeral bool) { adController := func(isEphemeral bool) {
if !ephemeral { if !isEphemeral {
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNotEqual(pvc, nil) framework.ExpectNotEqual(pvc, nil)
} }
pod := makePod(ns, pvc, ephemeral) pod := makePod(ns, pvc, isEphemeral)
// Get metrics // Get metrics
controllerMetrics, err := metricsGrabber.GrabFromControllerManager() controllerMetrics, err := metricsGrabber.GrabFromControllerManager()
@ -447,27 +454,27 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
} }
testAll := func(ephemeral bool) { testAll := func(isEphemeral bool) {
ginkgo.It("should create prometheus metrics for volume provisioning and attach/detach", func() { ginkgo.It("should create prometheus metrics for volume provisioning and attach/detach", func() {
provisioning(ephemeral) provisioning(isEphemeral)
}) })
ginkgo.It("should create prometheus metrics for volume provisioning errors [Slow]", func() { ginkgo.It("should create prometheus metrics for volume provisioning errors [Slow]", func() {
provisioningError(ephemeral) provisioningError(isEphemeral)
}) })
ginkgo.It("should create volume metrics with the correct FilesystemMode PVC ref", func() { ginkgo.It("should create volume metrics with the correct FilesystemMode PVC ref", func() {
filesystemMode(ephemeral) filesystemMode(isEphemeral)
}) })
ginkgo.It("should create volume metrics with the correct BlockMode PVC ref", func() { ginkgo.It("should create volume metrics with the correct BlockMode PVC ref", func() {
blockmode(ephemeral) blockmode(isEphemeral)
}) })
ginkgo.It("should create metrics for total time taken in volume operations in P/V Controller", func() { ginkgo.It("should create metrics for total time taken in volume operations in P/V Controller", func() {
totalTime(ephemeral) totalTime(isEphemeral)
}) })
ginkgo.It("should create volume metrics in Volume Manager", func() { ginkgo.It("should create volume metrics in Volume Manager", func() {
volumeManager(ephemeral) volumeManager(isEphemeral)
}) })
ginkgo.It("should create metrics for total number of volumes in A/D Controller", func() { ginkgo.It("should create metrics for total number of volumes in A/D Controller", func() {
adController(ephemeral) adController(isEphemeral)
}) })
} }
@ -873,10 +880,10 @@ func waitForADControllerStatesMetrics(metricsGrabber *e2emetrics.Grabber, metric
// makePod creates a pod which either references the PVC or creates it via a // makePod creates a pod which either references the PVC or creates it via a
// generic ephemeral volume claim template. // generic ephemeral volume claim template.
func makePod(ns string, pvc *v1.PersistentVolumeClaim, ephemeral bool) *v1.Pod { func makePod(ns string, pvc *v1.PersistentVolumeClaim, isEphemeral bool) *v1.Pod {
claims := []*v1.PersistentVolumeClaim{pvc} claims := []*v1.PersistentVolumeClaim{pvc}
pod := e2epod.MakePod(ns, nil, claims, false, "") pod := e2epod.MakePod(ns, nil, claims, false, "")
if ephemeral { if isEphemeral {
volSrc := pod.Spec.Volumes[0] volSrc := pod.Spec.Volumes[0]
volSrc.PersistentVolumeClaim = nil volSrc.PersistentVolumeClaim = nil
volSrc.Ephemeral = &v1.EphemeralVolumeSource{ volSrc.Ephemeral = &v1.EphemeralVolumeSource{