e2e: use Ginkgo context

All code must use the context from Ginkgo when doing API calls or polling for a
change, otherwise the code would not return immediately when the test gets
aborted.
This commit is contained in:
Patrick Ohly
2022-12-12 10:11:10 +01:00
parent bf1d1dfd0f
commit 2f6c4f5eab
418 changed files with 11489 additions and 11369 deletions

View File

@@ -62,7 +62,7 @@ type StorageClassTest struct {
DelayBinding bool
ClaimSize string
ExpectedSize string
PvCheck func(claim *v1.PersistentVolumeClaim)
PvCheck func(ctx context.Context, claim *v1.PersistentVolumeClaim)
VolumeMode v1.PersistentVolumeMode
AllowVolumeExpansion bool
NodeSelection e2epod.NodeSelection
@@ -136,12 +136,12 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
f := framework.NewFrameworkWithCustomTimeouts("provisioning", storageframework.GetDriverTimeouts(driver))
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
init := func() {
init := func(ctx context.Context) {
l = local{}
dDriver, _ = driver.(storageframework.DynamicPVTestDriver)
// Now do the more expensive test initialization.
l.config = driver.PrepareTest(f)
l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName)
l.config = driver.PrepareTest(ctx, f)
l.migrationCheck = newMigrationOpCheck(ctx, f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName)
ginkgo.DeferCleanup(l.migrationCheck.validateMigrationVolumeOpCounts)
l.cs = l.config.Framework.ClientSet
testVolumeSizeRange := p.GetTestSuiteInfo().SupportedSizeRange
@@ -149,7 +149,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)
l.sc = dDriver.GetDynamicProvisionStorageClass(l.config, pattern.FsType)
l.sc = dDriver.GetDynamicProvisionStorageClass(ctx, l.config, pattern.FsType)
if l.sc == nil {
e2eskipper.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
}
@@ -185,10 +185,10 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
e2eskipper.Skipf("Block volumes do not support mount options - skipping")
}
init()
init(ctx)
l.testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List()
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) {
PVWriteReadSingleNodeCheck(ctx, l.cs, f.Timeouts, claim, l.config.ClientNodeSelection)
}
SetupStorageClass(ctx, l.testCase.Client, l.testCase.Class)
@@ -209,7 +209,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
framework.Failf("Driver %q has CapSnapshotDataSource but does not implement SnapshottableTestDriver", dInfo.Name)
}
init()
init(ctx)
dc := l.config.Framework.DynamicClient
testConfig := storageframework.ConvertTestConfig(l.config)
@@ -217,7 +217,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
dataSourceRef := prepareSnapshotDataSourceForProvisioning(ctx, f, testConfig, l.config, pattern, l.cs, dc, l.pvc, l.sc, sDriver, pattern.VolMode, expectedContent)
l.pvc.Spec.DataSourceRef = dataSourceRef
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) {
ginkgo.By("checking whether the created volume has the pre-populated data")
tests := []e2evolume.Test{
{
@@ -227,7 +227,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
ExpectedContent: expectedContent,
},
}
e2evolume.TestVolumeClientSlow(f, testConfig, nil, "", tests)
e2evolume.TestVolumeClientSlow(ctx, f, testConfig, nil, "", tests)
}
l.testCase.TestDynamicProvisioning(ctx)
})
@@ -240,10 +240,10 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
e2eskipper.Skipf("Test for Block volumes is not implemented - skipping")
}
init()
init(ctx)
ginkgo.By("Creating validator namespace")
valNamespace, err := f.CreateNamespace(fmt.Sprintf("%s-val", f.Namespace.Name), map[string]string{
valNamespace, err := f.CreateNamespace(ctx, fmt.Sprintf("%s-val", f.Namespace.Name), map[string]string{
"e2e-framework": f.BaseName,
"e2e-test-namespace": f.Namespace.Name,
})
@@ -256,14 +256,14 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
"test/e2e/testing-manifests/storage-csi/any-volume-datasource/volume-data-source-validator/rbac-data-source-validator.yaml",
"test/e2e/testing-manifests/storage-csi/any-volume-datasource/volume-data-source-validator/setup-data-source-validator.yaml",
}
err = storageutils.CreateFromManifests(f, valNamespace,
err = storageutils.CreateFromManifests(ctx, f, valNamespace,
func(item interface{}) error { return nil },
valManifests...)
framework.ExpectNoError(err)
ginkgo.By("Creating populator namespace")
popNamespace, err := f.CreateNamespace(fmt.Sprintf("%s-pop", f.Namespace.Name), map[string]string{
popNamespace, err := f.CreateNamespace(ctx, fmt.Sprintf("%s-pop", f.Namespace.Name), map[string]string{
"e2e-framework": f.BaseName,
"e2e-test-namespace": f.Namespace.Name,
})
@@ -275,7 +275,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
"test/e2e/testing-manifests/storage-csi/any-volume-datasource/crd/hello-populator-crd.yaml",
"test/e2e/testing-manifests/storage-csi/any-volume-datasource/hello-populator-deploy.yaml",
}
err = storageutils.CreateFromManifests(f, popNamespace,
err = storageutils.CreateFromManifests(ctx, f, popNamespace,
func(item interface{}) error {
switch item := item.(type) {
case *appsv1.Deployment:
@@ -334,12 +334,12 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
},
}
_, err = dc.Resource(volumePopulatorGVR).Create(context.TODO(), helloPopulatorCR, metav1.CreateOptions{})
_, err = dc.Resource(volumePopulatorGVR).Create(ctx, helloPopulatorCR, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer func() {
framework.Logf("deleting VolumePopulator CR datasource %q/%q", helloPopulatorCR.GetNamespace(), helloPopulatorCR.GetName())
err = dc.Resource(volumePopulatorGVR).Delete(context.TODO(), helloPopulatorCR.GetName(), metav1.DeleteOptions{})
err = dc.Resource(volumePopulatorGVR).Delete(ctx, helloPopulatorCR.GetName(), metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
framework.Failf("Error deleting VolumePopulator CR datasource %q. Error: %v", helloPopulatorCR.GetName(), err)
}
@@ -366,12 +366,12 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
},
}
_, err = dc.Resource(helloGVR).Namespace(f.Namespace.Name).Create(context.TODO(), helloCR, metav1.CreateOptions{})
_, err = dc.Resource(helloGVR).Namespace(f.Namespace.Name).Create(ctx, helloCR, metav1.CreateOptions{})
framework.ExpectNoError(err)
defer func() {
framework.Logf("deleting Hello CR datasource %q/%q", helloCR.GetNamespace(), helloCR.GetName())
err = dc.Resource(helloGVR).Namespace(helloCR.GetNamespace()).Delete(context.TODO(), helloCR.GetName(), metav1.DeleteOptions{})
err = dc.Resource(helloGVR).Namespace(helloCR.GetNamespace()).Delete(ctx, helloCR.GetName(), metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
framework.Failf("Error deleting Hello CR datasource %q. Error: %v", helloCR.GetName(), err)
}
@@ -386,7 +386,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
testConfig := storageframework.ConvertTestConfig(l.config)
l.testCase.NodeSelection = testConfig.ClientNodeSelection
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) {
ginkgo.By("checking whether the created volume has the pre-populated data")
tests := []e2evolume.Test{
{
@@ -396,7 +396,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
ExpectedContent: expectedContent,
},
}
e2evolume.TestVolumeClientSlow(f, testConfig, nil, "", tests)
e2evolume.TestVolumeClientSlow(ctx, f, testConfig, nil, "", tests)
}
SetupStorageClass(ctx, l.testCase.Client, l.testCase.Class)
@@ -408,12 +408,12 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
if !dInfo.Capabilities[storageframework.CapPVCDataSource] {
e2eskipper.Skipf("Driver %q does not support cloning - skipping", dInfo.Name)
}
init()
init(ctx)
if l.config.ClientNodeSelection.Name == "" {
// Schedule all pods to the same topology segment (e.g. a cloud availability zone), some
// drivers don't support cloning across them.
if err := ensureTopologyRequirements(&l.config.ClientNodeSelection, l.cs, dInfo, 1); err != nil {
if err := ensureTopologyRequirements(ctx, &l.config.ClientNodeSelection, l.cs, dInfo, 1); err != nil {
framework.Failf("Error setting topology requirements: %v", err)
}
}
@@ -422,7 +422,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
dataSourceRef := preparePVCDataSourceForProvisioning(ctx, f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent)
l.pvc.Spec.DataSourceRef = dataSourceRef
l.testCase.NodeSelection = testConfig.ClientNodeSelection
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) {
ginkgo.By("checking whether the created volume has the pre-populated data")
tests := []e2evolume.Test{
{
@@ -432,11 +432,11 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
ExpectedContent: expectedContent,
},
}
e2evolume.TestVolumeClientSlow(f, testConfig, nil, "", tests)
e2evolume.TestVolumeClientSlow(ctx, f, testConfig, nil, "", tests)
}
// Cloning fails if the source disk is still in the process of detaching, so we wait for the VolumeAttachment to be removed before cloning.
volumeAttachment := e2evolume.GetVolumeAttachmentName(f.ClientSet, testConfig, l.testCase.Provisioner, dataSourceRef.Name, l.sourcePVC.Namespace)
e2evolume.WaitForVolumeAttachmentTerminated(volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision)
volumeAttachment := e2evolume.GetVolumeAttachmentName(ctx, f.ClientSet, testConfig, l.testCase.Provisioner, dataSourceRef.Name, l.sourcePVC.Namespace)
e2evolume.WaitForVolumeAttachmentTerminated(ctx, volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision)
l.testCase.TestDynamicProvisioning(ctx)
})
@@ -449,12 +449,12 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
e2eskipper.Skipf("Driver %q does not support block volumes - skipping", dInfo.Name)
}
init()
init(ctx)
if l.config.ClientNodeSelection.Name == "" {
// Schedule all pods to the same topology segment (e.g. a cloud availability zone), some
// drivers don't support cloning across them.
if err := ensureTopologyRequirements(&l.config.ClientNodeSelection, l.cs, dInfo, 1); err != nil {
if err := ensureTopologyRequirements(ctx, &l.config.ClientNodeSelection, l.cs, dInfo, 1); err != nil {
framework.Failf("Error setting topology requirements: %v", err)
}
}
@@ -476,7 +476,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
t := *l.testCase
t.NodeSelection = testConfig.ClientNodeSelection
t.PvCheck = func(claim *v1.PersistentVolumeClaim) {
t.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) {
ginkgo.By(fmt.Sprintf("checking whether the created volume %d has the pre-populated data", i))
tests := []e2evolume.Test{
{
@@ -486,11 +486,11 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
ExpectedContent: expectedContent,
},
}
e2evolume.TestVolumeClientSlow(f, myTestConfig, nil, "", tests)
e2evolume.TestVolumeClientSlow(ctx, f, myTestConfig, nil, "", tests)
}
// Cloning fails if the source disk is still in the process of detaching, so we wait for the VolumeAttachment to be removed before cloning.
volumeAttachment := e2evolume.GetVolumeAttachmentName(f.ClientSet, testConfig, l.testCase.Provisioner, dataSourceRef.Name, l.sourcePVC.Namespace)
e2evolume.WaitForVolumeAttachmentTerminated(volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision)
volumeAttachment := e2evolume.GetVolumeAttachmentName(ctx, f.ClientSet, testConfig, l.testCase.Provisioner, dataSourceRef.Name, l.sourcePVC.Namespace)
e2evolume.WaitForVolumeAttachmentTerminated(ctx, volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision)
t.TestDynamicProvisioning(ctx)
}(i)
}
@@ -509,9 +509,9 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
e2eskipper.Skipf("this driver does not support multiple PVs with the same volumeHandle")
}
init()
init(ctx)
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) {
MultiplePVMountSingleNodeCheck(ctx, l.cs, f.Timeouts, claim, l.config.ClientNodeSelection)
}
SetupStorageClass(ctx, l.testCase.Client, l.testCase.Class)
@@ -554,7 +554,7 @@ func SetupStorageClass(
}
} else {
// StorageClass is nil, so the default one will be used
scName, err := e2epv.GetDefaultStorageClassName(client)
scName, err := e2epv.GetDefaultStorageClassName(ctx, client)
framework.ExpectNoError(err)
ginkgo.By("Wanted storage class is nil, fetching default StorageClass=" + scName)
computedStorageClass, err = client.StorageV1().StorageClasses().Get(ctx, scName, metav1.GetOptions{})
@@ -604,15 +604,15 @@ func (t StorageClassTest) TestDynamicProvisioning(ctx context.Context) *v1.Persi
}
var pod *v1.Pod
pod, err := e2epod.CreateSecPod(client, podConfig, t.Timeouts.DataSourceProvision)
pod, err := e2epod.CreateSecPod(ctx, client, podConfig, t.Timeouts.DataSourceProvision)
// Delete pod now, otherwise PV can't be deleted below
framework.ExpectNoError(err)
e2epod.DeletePodOrFail(client, pod.Namespace, pod.Name)
e2epod.DeletePodOrFail(ctx, client, pod.Namespace, pod.Name)
}
// Run the checker
if t.PvCheck != nil {
t.PvCheck(claim)
t.PvCheck(ctx, claim)
}
pv := t.checkProvisioning(ctx, client, claim, class)
@@ -629,7 +629,7 @@ func (t StorageClassTest) TestDynamicProvisioning(ctx context.Context) *v1.Persi
// t.Timeouts.PVDeleteSlow) to recover from random cloud hiccups.
if pv != nil && pv.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete {
ginkgo.By(fmt.Sprintf("deleting the claim's PV %q", pv.Name))
framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, t.Timeouts.PVDeleteSlow))
framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(ctx, client, pv.Name, 5*time.Second, t.Timeouts.PVDeleteSlow))
}
return pv
@@ -650,7 +650,7 @@ func getBoundPV(ctx context.Context, client clientset.Interface, pvc *v1.Persist
// checkProvisioning verifies that the claim is bound and has the correct properties
func (t StorageClassTest) checkProvisioning(ctx context.Context, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume {
err := e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision)
err := e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision)
framework.ExpectNoError(err)
ginkgo.By("checking the claim")
@@ -721,7 +721,7 @@ func PVWriteReadSingleNodeCheck(ctx context.Context, client clientset.Interface,
// pod might be nil now.
StopPod(ctx, client, pod)
})
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "get pod")
actualNodeName := runningPod.Spec.NodeName
@@ -778,7 +778,7 @@ func PVMultiNodeCheck(ctx context.Context, client clientset.Interface, timeouts
ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
command := "echo 'hello world' > /mnt/test/data"
pod = StartInPodWithVolume(ctx, client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node)
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "get pod")
actualNodeName := runningPod.Spec.NodeName
@@ -791,7 +791,7 @@ func PVMultiNodeCheck(ctx context.Context, client clientset.Interface, timeouts
ginkgo.By(fmt.Sprintf("checking the created volume is readable and retains data on another node %+v", secondNode))
command = "grep 'hello world' /mnt/test/data"
pod = StartInPodWithVolume(ctx, client, claim.Namespace, claim.Name, "pvc-reader-node2", command, secondNode)
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "get pod")
framework.ExpectNotEqual(runningPod.Spec.NodeName, actualNodeName, "second pod should have run on a different node")
@@ -817,7 +817,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Co
defer func() {
errors := map[string]error{}
for _, claim := range createdClaims {
err := e2epv.DeletePersistentVolumeClaim(t.Client, claim.Name, claim.Namespace)
err := e2epv.DeletePersistentVolumeClaim(ctx, t.Client, claim.Name, claim.Namespace)
if err != nil {
errors[claim.Name] = err
}
@@ -831,7 +831,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Co
// Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out
ginkgo.By("checking the claims are in pending state")
err = e2epv.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, t.Timeouts.ClaimProvisionShort, true)
err = e2epv.WaitForPersistentVolumeClaimsPhase(ctx, v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, t.Timeouts.ClaimProvisionShort, true)
framework.ExpectError(err)
verifyPVCsPending(ctx, t.Client, createdClaims)
@@ -839,14 +839,14 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Co
// Create a pod referring to the claim and wait for it to get to running
var pod *v1.Pod
if expectUnschedulable {
pod, err = e2epod.CreateUnschedulablePod(t.Client, namespace, nodeSelector, createdClaims, true /* isPrivileged */, "" /* command */)
pod, err = e2epod.CreateUnschedulablePod(ctx, t.Client, namespace, nodeSelector, createdClaims, true /* isPrivileged */, "" /* command */)
} else {
pod, err = e2epod.CreatePod(t.Client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */)
pod, err = e2epod.CreatePod(ctx, t.Client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */)
}
framework.ExpectNoError(err)
ginkgo.DeferCleanup(func(ctx context.Context) error {
e2epod.DeletePodOrFail(t.Client, pod.Namespace, pod.Name)
return e2epod.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, t.Timeouts.PodDelete)
e2epod.DeletePodOrFail(ctx, t.Client, pod.Namespace, pod.Name)
return e2epod.WaitForPodToDisappear(ctx, t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, t.Timeouts.PodDelete)
})
if expectUnschedulable {
// Verify that no claims are provisioned.
@@ -865,7 +865,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Co
claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
// make sure claim did bind
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision)
err = e2epv.WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision)
framework.ExpectNoError(err)
pv, err := t.Client.CoreV1().PersistentVolumes().Get(ctx, claim.Spec.VolumeName, metav1.GetOptions{})
@@ -881,7 +881,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Co
func RunInPodWithVolume(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns, claimName, podName, command string, node e2epod.NodeSelection) *v1.Pod {
pod := StartInPodWithVolume(ctx, c, ns, claimName, podName, command, node)
defer StopPod(ctx, c, pod)
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, pod.Namespace, t.PodStartSlow))
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, c, pod.Name, pod.Namespace, t.PodStartSlow))
// get the latest status of the pod
pod, err := c.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
@@ -954,7 +954,7 @@ func StopPod(ctx context.Context, c clientset.Interface, pod *v1.Pod) {
} else {
framework.Logf("Pod %s has the following logs: %s", pod.Name, body)
}
e2epod.DeletePodWithWait(c, pod)
framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, pod))
}
// StopPodAndDependents first tries to log the output of the pod's container,
@@ -1008,13 +1008,13 @@ func StopPodAndDependents(ctx context.Context, c clientset.Interface, timeouts *
framework.Logf("pod Delete API error: %v", err)
}
framework.Logf("Wait up to %v for pod %q to be fully deleted", timeouts.PodDelete, pod.Name)
e2epod.WaitForPodNotFoundInNamespace(c, pod.Name, pod.Namespace, timeouts.PodDelete)
framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, c, pod.Name, pod.Namespace, timeouts.PodDelete))
if len(podPVs) > 0 {
for _, pv := range podPVs {
// As with CSI inline volumes, we use the pod delete timeout here because conceptually
// the volume deletion needs to be that fast (whatever "that" is).
framework.Logf("Wait up to %v for pod PV %s to be fully deleted", timeouts.PodDelete, pv.Name)
e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, timeouts.PodDelete)
framework.ExpectNoError(e2epv.WaitForPersistentVolumeDeleted(ctx, c, pv.Name, 5*time.Second, timeouts.PodDelete))
}
}
}
@@ -1065,10 +1065,10 @@ func prepareSnapshotDataSourceForProvisioning(
ExpectedContent: injectContent,
},
}
e2evolume.InjectContent(f, config, nil, "", tests)
e2evolume.InjectContent(ctx, f, config, nil, "", tests)
parameters := map[string]string{}
snapshotResource := storageframework.CreateSnapshotResource(sDriver, perTestConfig, pattern, initClaim.GetName(), initClaim.GetNamespace(), f.Timeouts, parameters)
snapshotResource := storageframework.CreateSnapshotResource(ctx, sDriver, perTestConfig, pattern, initClaim.GetName(), initClaim.GetNamespace(), f.Timeouts, parameters)
group := "snapshot.storage.k8s.io"
dataSourceRef := &v1.TypedObjectReference{
APIGroup: &group,
@@ -1083,7 +1083,7 @@ func prepareSnapshotDataSourceForProvisioning(
framework.Failf("Error deleting initClaim %q. Error: %v", initClaim.Name, err)
}
err = snapshotResource.CleanupResource(f.Timeouts)
err = snapshotResource.CleanupResource(ctx, f.Timeouts)
framework.ExpectNoError(err)
}
ginkgo.DeferCleanup(cleanupFunc)
@@ -1120,7 +1120,7 @@ func preparePVCDataSourceForProvisioning(
ExpectedContent: injectContent,
},
}
e2evolume.InjectContent(f, config, nil, "", tests)
e2evolume.InjectContent(ctx, f, config, nil, "", tests)
dataSourceRef := &v1.TypedObjectReference{
Kind: "PersistentVolumeClaim",
@@ -1151,11 +1151,11 @@ func MultiplePVMountSingleNodeCheck(ctx context.Context, client clientset.Interf
NodeSelection: node,
PVCs: []*v1.PersistentVolumeClaim{claim},
}
pod1, err := e2epod.CreateSecPodWithNodeSelection(client, &pod1Config, timeouts.PodStart)
pod1, err := e2epod.CreateSecPodWithNodeSelection(ctx, client, &pod1Config, timeouts.PodStart)
framework.ExpectNoError(err)
defer func() {
ginkgo.By(fmt.Sprintf("Deleting Pod %s/%s", pod1.Namespace, pod1.Name))
framework.ExpectNoError(e2epod.DeletePodWithWait(client, pod1))
framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, client, pod1))
}()
ginkgo.By(fmt.Sprintf("Created Pod %s/%s on node %s", pod1.Namespace, pod1.Name, pod1.Spec.NodeName))
@@ -1178,7 +1178,7 @@ func MultiplePVMountSingleNodeCheck(ctx context.Context, client clientset.Interf
VolumeMode: e2evolume.Spec.VolumeMode,
}
pv2, pvc2, err := e2epv.CreatePVCPV(client, timeouts, pv2Config, pvc2Config, claim.Namespace, true)
pv2, pvc2, err := e2epv.CreatePVCPV(ctx, client, timeouts, pv2Config, pvc2Config, claim.Namespace, true)
framework.ExpectNoError(err, "PVC, PV creation failed")
framework.Logf("Created PVC %s/%s and PV %s", pvc2.Namespace, pvc2.Name, pv2.Name)
@@ -1187,16 +1187,16 @@ func MultiplePVMountSingleNodeCheck(ctx context.Context, client clientset.Interf
NodeSelection: e2epod.NodeSelection{Name: pod1.Spec.NodeName},
PVCs: []*v1.PersistentVolumeClaim{pvc2},
}
pod2, err := e2epod.CreateSecPodWithNodeSelection(client, &pod2Config, timeouts.PodStart)
pod2, err := e2epod.CreateSecPodWithNodeSelection(ctx, client, &pod2Config, timeouts.PodStart)
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Created Pod %s/%s on node %s", pod2.Namespace, pod2.Name, pod2.Spec.NodeName))
ginkgo.By(fmt.Sprintf("Deleting Pod %s/%s", pod2.Namespace, pod2.Name))
framework.ExpectNoError(e2epod.DeletePodWithWait(client, pod2))
framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, client, pod2))
err = e2epv.DeletePersistentVolumeClaim(client, pvc2.Name, pvc2.Namespace)
err = e2epv.DeletePersistentVolumeClaim(ctx, client, pvc2.Name, pvc2.Namespace)
framework.ExpectNoError(err, "Failed to delete PVC: %s/%s", pvc2.Namespace, pvc2.Name)
err = e2epv.DeletePersistentVolume(client, pv2.Name)
err = e2epv.DeletePersistentVolume(ctx, client, pv2.Name)
framework.ExpectNoError(err, "Failed to delete PV: %s", pv2.Name)
}