e2e storage: eliminate context.TODO and cleanup callbacks

Tests should accept a context from Ginkgo and pass it through to all functions
which may block for a longer period of time. In particular all Kubernetes API
calls through client-go should use that context. Then if a timeout occurs,
the test returns immediately because everything that it could block on will
return.

Cleanup code then needs to run in a separate Ginkgo node, typically
DeferCleanup, which ensures that it gets a separate context which has not timed
out yet.
This commit is contained in:
Patrick Ohly
2022-10-07 18:56:23 +02:00
parent d8d3dc9476
commit f15d7f6cca
11 changed files with 213 additions and 246 deletions

View File

@@ -141,6 +141,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
// Now do the more expensive test initialization.
l.config = driver.PrepareTest(f)
l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName)
ginkgo.DeferCleanup(l.migrationCheck.validateMigrationVolumeOpCounts)
l.cs = l.config.Framework.ClientSet
testVolumeSizeRange := p.GetTestSuiteInfo().SupportedSizeRange
driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange
@@ -175,11 +176,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
}
}
cleanup := func() {
l.migrationCheck.validateMigrationVolumeOpCounts()
}
ginkgo.It("should provision storage with mount options", func() {
ginkgo.It("should provision storage with mount options", func(ctx context.Context) {
if dInfo.SupportedMountOption == nil {
e2eskipper.Skipf("Driver %q does not define supported mount option - skipping", dInfo.Name)
}
@@ -188,19 +185,17 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
}
init()
defer cleanup()
l.testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List()
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
PVWriteReadSingleNodeCheck(l.cs, f.Timeouts, claim, l.config.ClientNodeSelection)
PVWriteReadSingleNodeCheck(ctx, l.cs, f.Timeouts, claim, l.config.ClientNodeSelection)
}
_, clearProvisionedStorageClass := SetupStorageClass(l.testCase.Client, l.testCase.Class)
defer clearProvisionedStorageClass()
SetupStorageClass(ctx, l.testCase.Client, l.testCase.Class)
l.testCase.TestDynamicProvisioning()
l.testCase.TestDynamicProvisioning(ctx)
})
ginkgo.It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() {
ginkgo.It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func(ctx context.Context) {
if !dInfo.Capabilities[storageframework.CapSnapshotDataSource] {
e2eskipper.Skipf("Driver %q does not support populate data from snapshot - skipping", dInfo.Name)
}
@@ -214,13 +209,11 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
}
init()
defer cleanup()
dc := l.config.Framework.DynamicClient
testConfig := storageframework.ConvertTestConfig(l.config)
expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
dataSource, cleanupFunc := prepareSnapshotDataSourceForProvisioning(f, testConfig, l.config, pattern, l.cs, dc, l.pvc, l.sc, sDriver, pattern.VolMode, expectedContent)
defer cleanupFunc()
dataSource := prepareSnapshotDataSourceForProvisioning(ctx, f, testConfig, l.config, pattern, l.cs, dc, l.pvc, l.sc, sDriver, pattern.VolMode, expectedContent)
l.pvc.Spec.DataSource = dataSource
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
@@ -235,10 +228,10 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
}
e2evolume.TestVolumeClientSlow(f, testConfig, nil, "", tests)
}
l.testCase.TestDynamicProvisioning()
l.testCase.TestDynamicProvisioning(ctx)
})
ginkgo.It("should provision storage with any volume data source [Serial]", func() {
ginkgo.It("should provision storage with any volume data source [Serial]", func(ctx context.Context) {
if len(dInfo.InTreePluginName) != 0 {
e2eskipper.Skipf("AnyVolumeDataSource feature only works with CSI drivers - skipping")
}
@@ -247,7 +240,6 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
}
init()
defer cleanup()
ginkgo.By("Creating validator namespace")
valNamespace, err := f.CreateNamespace(fmt.Sprintf("%s-val", f.Namespace.Name), map[string]string{
@@ -414,18 +406,16 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
e2evolume.TestVolumeClientSlow(f, testConfig, nil, "", tests)
}
_, clearProvisionedStorageClass := SetupStorageClass(l.testCase.Client, l.testCase.Class)
defer clearProvisionedStorageClass()
SetupStorageClass(ctx, l.testCase.Client, l.testCase.Class)
l.testCase.TestDynamicProvisioning()
l.testCase.TestDynamicProvisioning(ctx)
})
ginkgo.It("should provision storage with pvc data source", func() {
ginkgo.It("should provision storage with pvc data source", func(ctx context.Context) {
if !dInfo.Capabilities[storageframework.CapPVCDataSource] {
e2eskipper.Skipf("Driver %q does not support cloning - skipping", dInfo.Name)
}
init()
defer cleanup()
if l.config.ClientNodeSelection.Name == "" {
// Schedule all pods to the same topology segment (e.g. a cloud availability zone), some
@@ -436,8 +426,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
}
testConfig := storageframework.ConvertTestConfig(l.config)
expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
dataSource, dataSourceCleanup := preparePVCDataSourceForProvisioning(f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent)
defer dataSourceCleanup()
dataSource := preparePVCDataSourceForProvisioning(ctx, f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent)
l.pvc.Spec.DataSource = dataSource
l.testCase.NodeSelection = testConfig.ClientNodeSelection
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
@@ -455,10 +444,10 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
// Cloning fails if the source disk is still in the process of detaching, so we wait for the VolumeAttachment to be removed before cloning.
volumeAttachment := e2evolume.GetVolumeAttachmentName(f.ClientSet, testConfig, l.testCase.Provisioner, dataSource.Name, l.sourcePVC.Namespace)
e2evolume.WaitForVolumeAttachmentTerminated(volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision)
l.testCase.TestDynamicProvisioning()
l.testCase.TestDynamicProvisioning(ctx)
})
ginkgo.It("should provision storage with pvc data source in parallel [Slow]", func() {
ginkgo.It("should provision storage with pvc data source in parallel [Slow]", func(ctx context.Context) {
// Test cloning a single volume multiple times.
if !dInfo.Capabilities[storageframework.CapPVCDataSource] {
e2eskipper.Skipf("Driver %q does not support cloning - skipping", dInfo.Name)
@@ -468,7 +457,6 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
}
init()
defer cleanup()
if l.config.ClientNodeSelection.Name == "" {
// Schedule all pods to the same topology segment (e.g. a cloud availability zone), some
@@ -479,8 +467,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
}
testConfig := storageframework.ConvertTestConfig(l.config)
expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
dataSource, dataSourceCleanup := preparePVCDataSourceForProvisioning(f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent)
defer dataSourceCleanup()
dataSource := preparePVCDataSourceForProvisioning(ctx, f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent)
l.pvc.Spec.DataSource = dataSource
var wg sync.WaitGroup
@@ -511,13 +498,13 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
// Cloning fails if the source disk is still in the process of detaching, so we wait for the VolumeAttachment to be removed before cloning.
volumeAttachment := e2evolume.GetVolumeAttachmentName(f.ClientSet, testConfig, l.testCase.Provisioner, dataSource.Name, l.sourcePVC.Namespace)
e2evolume.WaitForVolumeAttachmentTerminated(volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision)
t.TestDynamicProvisioning()
t.TestDynamicProvisioning(ctx)
}(i)
}
wg.Wait()
})
ginkgo.It("should mount multiple PV pointing to the same storage on the same node", func() {
ginkgo.It("should mount multiple PV pointing to the same storage on the same node", func(ctx context.Context) {
// csi-hostpath driver does not support this test case. In this test case, we have 2 PV containing the same underlying storage.
// during the NodeStage call for the second volume, csi-hostpath fails the call, because it thinks the volume is already staged at a different path.
// Note: This is not an issue with driver like PD CSI where the NodeStage is a no-op for block mode.
@@ -526,15 +513,13 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
}
init()
defer cleanup()
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
MultiplePVMountSingleNodeCheck(l.cs, f.Timeouts, claim, l.config.ClientNodeSelection)
MultiplePVMountSingleNodeCheck(ctx, l.cs, f.Timeouts, claim, l.config.ClientNodeSelection)
}
_, clearProvisionedStorageClass := SetupStorageClass(l.testCase.Client, l.testCase.Class)
defer clearProvisionedStorageClass()
SetupStorageClass(ctx, l.testCase.Client, l.testCase.Class)
l.testCase.TestDynamicProvisioning()
l.testCase.TestDynamicProvisioning(ctx)
})
}
@@ -542,49 +527,50 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
// then it's returned as it is, if it doesn't exist then it's created first
// and then returned, if the spec is nil then we return the `default` StorageClass
func SetupStorageClass(
ctx context.Context,
client clientset.Interface,
class *storagev1.StorageClass,
) (*storagev1.StorageClass, func()) {
) *storagev1.StorageClass {
gomega.Expect(client).NotTo(gomega.BeNil(), "SetupStorageClass.client is required")
var err error
var computedStorageClass *storagev1.StorageClass
var clearComputedStorageClass = func() {}
if class != nil {
computedStorageClass, err = client.StorageV1().StorageClasses().Get(context.TODO(), class.Name, metav1.GetOptions{})
computedStorageClass, err = client.StorageV1().StorageClasses().Get(ctx, class.Name, metav1.GetOptions{})
if err == nil {
// skip storageclass creation if it already exists
ginkgo.By("Storage class " + computedStorageClass.Name + " is already created, skipping creation.")
} else {
ginkgo.By("Creating a StorageClass")
class, err = client.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{})
class, err = client.StorageV1().StorageClasses().Create(ctx, class, metav1.CreateOptions{})
framework.ExpectNoError(err)
computedStorageClass, err = client.StorageV1().StorageClasses().Get(context.TODO(), class.Name, metav1.GetOptions{})
computedStorageClass, err = client.StorageV1().StorageClasses().Get(ctx, class.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
clearComputedStorageClass = func() {
clearComputedStorageClass := func(ctx context.Context) {
framework.Logf("deleting storage class %s", computedStorageClass.Name)
err := client.StorageV1().StorageClasses().Delete(context.TODO(), computedStorageClass.Name, metav1.DeleteOptions{})
err := client.StorageV1().StorageClasses().Delete(ctx, computedStorageClass.Name, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
framework.ExpectNoError(err, "delete storage class")
}
}
ginkgo.DeferCleanup(clearComputedStorageClass)
}
} else {
// StorageClass is nil, so the default one will be used
scName, err := e2epv.GetDefaultStorageClassName(client)
framework.ExpectNoError(err)
ginkgo.By("Wanted storage class is nil, fetching default StorageClass=" + scName)
computedStorageClass, err = client.StorageV1().StorageClasses().Get(context.TODO(), scName, metav1.GetOptions{})
computedStorageClass, err = client.StorageV1().StorageClasses().Get(ctx, scName, metav1.GetOptions{})
framework.ExpectNoError(err)
}
return computedStorageClass, clearComputedStorageClass
return computedStorageClass
}
// TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest
// it's assumed that the StorageClass `t.Class` is already provisioned,
// see #ProvisionStorageClass
func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
func (t StorageClassTest) TestDynamicProvisioning(ctx context.Context) *v1.PersistentVolume {
var err error
client := t.Client
gomega.Expect(client).NotTo(gomega.BeNil(), "StorageClassTest.Client is required")
@@ -593,16 +579,16 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
gomega.Expect(claim.GenerateName).NotTo(gomega.BeEmpty(), "StorageClassTest.Claim.GenerateName must not be empty")
class := t.Class
gomega.Expect(class).NotTo(gomega.BeNil(), "StorageClassTest.Class is required")
class, err = client.StorageV1().StorageClasses().Get(context.TODO(), class.Name, metav1.GetOptions{})
class, err = client.StorageV1().StorageClasses().Get(ctx, class.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "StorageClass.Class "+class.Name+" couldn't be fetched from the cluster")
ginkgo.By(fmt.Sprintf("creating claim=%+v", claim))
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), claim, metav1.CreateOptions{})
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(ctx, claim, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer func() {
framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
// typically this claim has already been deleted
err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, metav1.DeleteOptions{})
err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(ctx, claim.Name, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
}
@@ -632,10 +618,10 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
t.PvCheck(claim)
}
pv := t.checkProvisioning(client, claim, class)
pv := t.checkProvisioning(ctx, client, claim, class)
ginkgo.By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name))
framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, metav1.DeleteOptions{}))
framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(ctx, claim.Name, metav1.DeleteOptions{}))
// Wait for the PV to get deleted if reclaim policy is Delete. (If it's
// Retain, there's no use waiting because the PV won't be auto-deleted and
@@ -653,25 +639,25 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
}
// getBoundPV returns a PV details.
func getBoundPV(client clientset.Interface, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {
func getBoundPV(ctx context.Context, client clientset.Interface, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {
// Get new copy of the claim
claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
// Get the bound PV
pv, err := client.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{})
pv, err := client.CoreV1().PersistentVolumes().Get(ctx, claim.Spec.VolumeName, metav1.GetOptions{})
return pv, err
}
// checkProvisioning verifies that the claim is bound and has the correct properties
func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume {
func (t StorageClassTest) checkProvisioning(ctx context.Context, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume {
err := e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision)
framework.ExpectNoError(err)
ginkgo.By("checking the claim")
pv, err := getBoundPV(client, claim)
pv, err := getBoundPV(ctx, client, claim)
framework.ExpectNoError(err)
// Check sizes
@@ -730,23 +716,23 @@ func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v
// persistent across pods.
//
// This is a common test that can be called from a StorageClassTest.PvCheck.
func PVWriteReadSingleNodeCheck(client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) *v1.PersistentVolume {
func PVWriteReadSingleNodeCheck(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) *v1.PersistentVolume {
ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
command := "echo 'hello world' > /mnt/test/data"
pod := StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node)
pod := StartInPodWithVolume(ctx, client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node)
defer func() {
// pod might be nil now.
StopPod(client, pod)
StopPod(ctx, client, pod)
}()
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "get pod")
actualNodeName := runningPod.Spec.NodeName
StopPod(client, pod)
StopPod(ctx, client, pod)
pod = nil // Don't stop twice.
// Get a new copy of the PV
e2evolume, err := getBoundPV(client, claim)
e2evolume, err := getBoundPV(ctx, client, claim)
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("checking the created volume has the correct mount options, is readable and retains data on the same node %q", actualNodeName))
@@ -764,7 +750,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, timeouts *framework.
// agnhost doesn't support mount
command = "grep 'hello world' /mnt/test/data"
}
RunInPodWithVolume(client, timeouts, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, e2epod.NodeSelection{Name: actualNodeName})
RunInPodWithVolume(ctx, client, timeouts, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, e2epod.NodeSelection{Name: actualNodeName})
return e2evolume
}
@@ -783,23 +769,23 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, timeouts *framework.
// persistent across pods and across nodes.
//
// This is a common test that can be called from a StorageClassTest.PvCheck.
func PVMultiNodeCheck(client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
func PVMultiNodeCheck(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
framework.ExpectEqual(node.Name, "", "this test only works when not locked onto a single node")
var pod *v1.Pod
defer func() {
// passing pod = nil is okay.
StopPod(client, pod)
StopPod(ctx, client, pod)
}()
ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
command := "echo 'hello world' > /mnt/test/data"
pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node)
pod = StartInPodWithVolume(ctx, client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node)
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "get pod")
actualNodeName := runningPod.Spec.NodeName
StopPod(client, pod)
StopPod(ctx, client, pod)
pod = nil // Don't stop twice.
// Add node-anti-affinity.
@@ -807,17 +793,17 @@ func PVMultiNodeCheck(client clientset.Interface, timeouts *framework.TimeoutCon
e2epod.SetAntiAffinity(&secondNode, actualNodeName)
ginkgo.By(fmt.Sprintf("checking the created volume is readable and retains data on another node %+v", secondNode))
command = "grep 'hello world' /mnt/test/data"
pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-reader-node2", command, secondNode)
pod = StartInPodWithVolume(ctx, client, claim.Namespace, claim.Name, "pvc-reader-node2", command, secondNode)
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "get pod")
framework.ExpectNotEqual(runningPod.Spec.NodeName, actualNodeName, "second pod should have run on a different node")
StopPod(client, pod)
StopPod(ctx, client, pod)
pod = nil
}
// TestBindingWaitForFirstConsumerMultiPVC tests the binding with WaitForFirstConsumer mode
func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.PersistentVolumeClaim, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) {
func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Context, claims []*v1.PersistentVolumeClaim, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) {
var err error
framework.ExpectNotEqual(len(claims), 0)
namespace := claims[0].Namespace
@@ -826,7 +812,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
var claimNames []string
var createdClaims []*v1.PersistentVolumeClaim
for _, claim := range claims {
c, err := t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), claim, metav1.CreateOptions{})
c, err := t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(ctx, claim, metav1.CreateOptions{})
claimNames = append(claimNames, c.Name)
createdClaims = append(createdClaims, c)
framework.ExpectNoError(err)
@@ -850,7 +836,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
ginkgo.By("checking the claims are in pending state")
err = e2epv.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, t.Timeouts.ClaimProvisionShort, true)
framework.ExpectError(err)
verifyPVCsPending(t.Client, createdClaims)
verifyPVCsPending(ctx, t.Client, createdClaims)
ginkgo.By("creating a pod referring to the claims")
// Create a pod referring to the claim and wait for it to get to running
@@ -867,25 +853,25 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
}()
if expectUnschedulable {
// Verify that no claims are provisioned.
verifyPVCsPending(t.Client, createdClaims)
verifyPVCsPending(ctx, t.Client, createdClaims)
return nil, nil
}
// collect node details
node, err := t.Client.CoreV1().Nodes().Get(context.TODO(), pod.Spec.NodeName, metav1.GetOptions{})
node, err := t.Client.CoreV1().Nodes().Get(ctx, pod.Spec.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
ginkgo.By("re-checking the claims to see they bound")
var pvs []*v1.PersistentVolume
for _, claim := range createdClaims {
// Get new copy of the claim
claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{})
claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
// make sure claim did bind
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision)
framework.ExpectNoError(err)
pv, err := t.Client.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{})
pv, err := t.Client.CoreV1().PersistentVolumes().Get(ctx, claim.Spec.VolumeName, metav1.GetOptions{})
framework.ExpectNoError(err)
pvs = append(pvs, pv)
}
@@ -895,20 +881,20 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
// It starts, checks, collects output and stops it.
func RunInPodWithVolume(c clientset.Interface, t *framework.TimeoutContext, ns, claimName, podName, command string, node e2epod.NodeSelection) *v1.Pod {
pod := StartInPodWithVolume(c, ns, claimName, podName, command, node)
defer StopPod(c, pod)
func RunInPodWithVolume(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns, claimName, podName, command string, node e2epod.NodeSelection) *v1.Pod {
pod := StartInPodWithVolume(ctx, c, ns, claimName, podName, command, node)
defer StopPod(ctx, c, pod)
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, pod.Namespace, t.PodStartSlow))
// get the latest status of the pod
pod, err := c.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
pod, err := c.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
return pod
}
// StartInPodWithVolume starts a command in a pod with given claim mounted to /mnt directory
// The caller is responsible for checking the pod and deleting it.
func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node e2epod.NodeSelection) *v1.Pod {
return StartInPodWithVolumeSource(c, v1.VolumeSource{
func StartInPodWithVolume(ctx context.Context, c clientset.Interface, ns, claimName, podName, command string, node e2epod.NodeSelection) *v1.Pod {
return StartInPodWithVolumeSource(ctx, c, v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claimName,
},
@@ -917,7 +903,7 @@ func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command
// StartInPodWithVolumeSource starts a command in a pod with given volume mounted to /mnt directory
// The caller is responsible for checking the pod and deleting it.
func StartInPodWithVolumeSource(c clientset.Interface, volSrc v1.VolumeSource, ns, podName, command string, node e2epod.NodeSelection) *v1.Pod {
func StartInPodWithVolumeSource(ctx context.Context, c clientset.Interface, volSrc v1.VolumeSource, ns, podName, command string, node e2epod.NodeSelection) *v1.Pod {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
@@ -954,18 +940,18 @@ func StartInPodWithVolumeSource(c clientset.Interface, volSrc v1.VolumeSource, n
}
e2epod.SetNodeSelection(&pod.Spec, node)
pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
pod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create pod: %v", err)
return pod
}
// StopPod first tries to log the output of the pod's container, then deletes the pod and
// waits for that to succeed.
func StopPod(c clientset.Interface, pod *v1.Pod) {
func StopPod(ctx context.Context, c clientset.Interface, pod *v1.Pod) {
if pod == nil {
return
}
body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do(context.TODO()).Raw()
body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do(ctx).Raw()
if err != nil {
framework.Logf("Error getting logs for pod %s: %v", pod.Name, err)
} else {
@@ -977,11 +963,11 @@ func StopPod(c clientset.Interface, pod *v1.Pod) {
// StopPodAndDependents first tries to log the output of the pod's container,
// then deletes the pod and waits for that to succeed. Also waits for all owned
// resources to be deleted.
func StopPodAndDependents(c clientset.Interface, timeouts *framework.TimeoutContext, pod *v1.Pod) {
func StopPodAndDependents(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, pod *v1.Pod) {
if pod == nil {
return
}
body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do(context.TODO()).Raw()
body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do(ctx).Raw()
if err != nil {
framework.Logf("Error getting logs for pod %s: %v", pod.Name, err)
} else {
@@ -990,7 +976,7 @@ func StopPodAndDependents(c clientset.Interface, timeouts *framework.TimeoutCont
// We must wait explicitly for removal of the generic ephemeral volume PVs.
// For that we must find them first...
pvs, err := c.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{})
pvs, err := c.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "list PVs")
var podPVs []v1.PersistentVolume
for _, pv := range pvs.Items {
@@ -998,7 +984,7 @@ func StopPodAndDependents(c clientset.Interface, timeouts *framework.TimeoutCont
pv.Spec.ClaimRef.Namespace != pod.Namespace {
continue
}
pvc, err := c.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(context.TODO(), pv.Spec.ClaimRef.Name, metav1.GetOptions{})
pvc, err := c.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(ctx, pv.Spec.ClaimRef.Name, metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) {
// Must have been some unrelated PV, otherwise the PVC should exist.
continue
@@ -1011,7 +997,7 @@ func StopPodAndDependents(c clientset.Interface, timeouts *framework.TimeoutCont
framework.Logf("Deleting pod %q in namespace %q", pod.Name, pod.Namespace)
deletionPolicy := metav1.DeletePropagationForeground
err = c.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name,
err = c.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name,
metav1.DeleteOptions{
// If the pod is the owner of some resources (like ephemeral inline volumes),
// then we want to be sure that those are also gone before we return.
@@ -1036,16 +1022,17 @@ func StopPodAndDependents(c clientset.Interface, timeouts *framework.TimeoutCont
}
}
func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) {
func verifyPVCsPending(ctx context.Context, client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) {
for _, claim := range pvcs {
// Get new copy of the claim
claim, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{})
claim, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(claim.Status.Phase, v1.ClaimPending)
}
}
func prepareSnapshotDataSourceForProvisioning(
ctx context.Context,
f *framework.Framework,
config e2evolume.TestConfig,
perTestConfig *storageframework.PerTestConfig,
@@ -1057,14 +1044,14 @@ func prepareSnapshotDataSourceForProvisioning(
sDriver storageframework.SnapshottableTestDriver,
mode v1.PersistentVolumeMode,
injectContent string,
) (*v1.TypedLocalObjectReference, func()) {
_, clearComputedStorageClass := SetupStorageClass(client, class)
) *v1.TypedLocalObjectReference {
SetupStorageClass(ctx, client, class)
if initClaim.ResourceVersion != "" {
ginkgo.By("Skipping creation of PVC, it already exists")
} else {
ginkgo.By("[Initialize dataSource]creating a initClaim")
updatedClaim, err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Create(context.TODO(), initClaim, metav1.CreateOptions{})
updatedClaim, err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Create(ctx, initClaim, metav1.CreateOptions{})
if apierrors.IsAlreadyExists(err) {
err = nil
}
@@ -1092,24 +1079,23 @@ func prepareSnapshotDataSourceForProvisioning(
Name: snapshotResource.Vs.GetName(),
}
cleanupFunc := func() {
cleanupFunc := func(ctx context.Context) {
framework.Logf("deleting initClaim %q/%q", initClaim.Namespace, initClaim.Name)
err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Delete(context.TODO(), initClaim.Name, metav1.DeleteOptions{})
err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Delete(ctx, initClaim.Name, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
framework.Failf("Error deleting initClaim %q. Error: %v", initClaim.Name, err)
}
err = snapshotResource.CleanupResource(f.Timeouts)
framework.ExpectNoError(err)
clearComputedStorageClass()
}
ginkgo.DeferCleanup(cleanupFunc)
return dataSourceRef, cleanupFunc
return dataSourceRef
}
func preparePVCDataSourceForProvisioning(
ctx context.Context,
f *framework.Framework,
config e2evolume.TestConfig,
client clientset.Interface,
@@ -1117,15 +1103,15 @@ func preparePVCDataSourceForProvisioning(
class *storagev1.StorageClass,
mode v1.PersistentVolumeMode,
injectContent string,
) (*v1.TypedLocalObjectReference, func()) {
_, clearComputedStorageClass := SetupStorageClass(client, class)
) *v1.TypedLocalObjectReference {
SetupStorageClass(ctx, client, class)
if source.ResourceVersion != "" {
ginkgo.By("Skipping creation of PVC, it already exists")
} else {
ginkgo.By("[Initialize dataSource]creating a source PVC")
var err error
source, err = client.CoreV1().PersistentVolumeClaims(source.Namespace).Create(context.TODO(), source, metav1.CreateOptions{})
source, err = client.CoreV1().PersistentVolumeClaims(source.Namespace).Create(ctx, source, metav1.CreateOptions{})
framework.ExpectNoError(err)
}
@@ -1144,17 +1130,16 @@ func preparePVCDataSourceForProvisioning(
Name: source.GetName(),
}
cleanupFunc := func() {
cleanupFunc := func(ctx context.Context) {
framework.Logf("deleting source PVC %q/%q", source.Namespace, source.Name)
err := client.CoreV1().PersistentVolumeClaims(source.Namespace).Delete(context.TODO(), source.Name, metav1.DeleteOptions{})
err := client.CoreV1().PersistentVolumeClaims(source.Namespace).Delete(ctx, source.Name, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
framework.Failf("Error deleting source PVC %q. Error: %v", source.Name, err)
}
clearComputedStorageClass()
}
ginkgo.DeferCleanup(cleanupFunc)
return dataSourceRef, cleanupFunc
return dataSourceRef
}
// MultiplePVMountSingleNodeCheck checks that multiple PV pointing to the same underlying storage can be mounted simultaneously on a single node.
@@ -1163,7 +1148,7 @@ func preparePVCDataSourceForProvisioning(
// - Start Pod1 using PVC1, PV1 (which points to a underlying volume v) on node N1.
// - Create PVC2, PV2 and prebind them. PV2 points to the same underlying volume v.
// - Start Pod2 using PVC2, PV2 (which points to a underlying volume v) on node N1.
func MultiplePVMountSingleNodeCheck(client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
func MultiplePVMountSingleNodeCheck(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
pod1Config := e2epod.Config{
NS: claim.Namespace,
NodeSelection: node,
@@ -1178,7 +1163,7 @@ func MultiplePVMountSingleNodeCheck(client clientset.Interface, timeouts *framew
ginkgo.By(fmt.Sprintf("Created Pod %s/%s on node %s", pod1.Namespace, pod1.Name, pod1.Spec.NodeName))
// Create new PV which points to the same underlying storage. Retain policy is used so that deletion of second PVC does not trigger the deletion of its bound PV and underlying storage.
e2evolume, err := getBoundPV(client, claim)
e2evolume, err := getBoundPV(ctx, client, claim)
framework.ExpectNoError(err)
pv2Config := e2epv.PersistentVolumeConfig{
NamePrefix: fmt.Sprintf("%s-", "pv"),