e2e: use Ginkgo context

All code must use the context from Ginkgo when doing API calls or polling for a
change, otherwise the code would not return immediately when the test gets
aborted.
This commit is contained in:
Patrick Ohly
2022-12-12 10:11:10 +01:00
parent bf1d1dfd0f
commit 2f6c4f5eab
418 changed files with 11489 additions and 11369 deletions

View File

@@ -150,7 +150,7 @@ func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2ev
}, nil
}
func (n *nfsDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
func (n *nfsDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
provisioner := n.externalPluginName
parameters := map[string]string{"mountOptions": "vers=4.1"}
ns := config.Framework.Namespace.Name
@@ -158,25 +158,25 @@ func (n *nfsDriver) GetDynamicProvisionStorageClass(config *storageframework.Per
return storageframework.GetStorageClass(provisioner, parameters, nil, ns)
}
func (n *nfsDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig {
func (n *nfsDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
cs := f.ClientSet
ns := f.Namespace
n.externalPluginName = fmt.Sprintf("example.com/nfs-%s", ns.Name)
// TODO(mkimuram): cluster-admin gives too much right but system:persistent-volume-provisioner
// is not enough. We should create new clusterrole for testing.
err := e2eauth.BindClusterRole(cs.RbacV1(), "cluster-admin", ns.Name,
err := e2eauth.BindClusterRole(ctx, cs.RbacV1(), "cluster-admin", ns.Name,
rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: ns.Name, Name: "default"})
framework.ExpectNoError(err)
ginkgo.DeferCleanup(cs.RbacV1().ClusterRoleBindings().Delete, ns.Name+"--"+"cluster-admin", *metav1.NewDeleteOptions(0))
err = e2eauth.WaitForAuthorizationUpdate(cs.AuthorizationV1(),
err = e2eauth.WaitForAuthorizationUpdate(ctx, cs.AuthorizationV1(),
serviceaccount.MakeUsername(ns.Name, "default"),
"", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true)
framework.ExpectNoError(err, "Failed to update authorization: %v", err)
ginkgo.By("creating an external dynamic provisioner pod")
n.externalProvisionerPod = utils.StartExternalProvisioner(cs, ns.Name, n.externalPluginName)
n.externalProvisionerPod = utils.StartExternalProvisioner(ctx, cs, ns.Name, n.externalPluginName)
ginkgo.DeferCleanup(e2epod.DeletePodWithWait, cs, n.externalProvisionerPod)
return &storageframework.PerTestConfig{
@@ -186,7 +186,7 @@ func (n *nfsDriver) PrepareTest(f *framework.Framework) *storageframework.PerTes
}
}
func (n *nfsDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
func (n *nfsDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
f := config.Framework
cs := f.ClientSet
ns := f.Namespace
@@ -198,7 +198,7 @@ func (n *nfsDriver) CreateVolume(config *storageframework.PerTestConfig, volType
case storageframework.InlineVolume:
fallthrough
case storageframework.PreprovisionedPV:
c, serverPod, serverHost := e2evolume.NewNFSServer(cs, ns.Name, []string{})
c, serverPod, serverHost := e2evolume.NewNFSServer(ctx, cs, ns.Name, []string{})
config.ServerConfig = &c
return &nfsVolume{
serverHost: serverHost,
@@ -213,8 +213,8 @@ func (n *nfsDriver) CreateVolume(config *storageframework.PerTestConfig, volType
return nil
}
func (v *nfsVolume) DeleteVolume() {
cleanUpVolumeServer(v.f, v.serverPod)
func (v *nfsVolume) DeleteVolume(ctx context.Context) {
cleanUpVolumeServer(ctx, v.f, v.serverPod)
}
// iSCSI
@@ -307,7 +307,7 @@ func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2
return &pvSource, nil
}
func (i *iSCSIDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig {
func (i *iSCSIDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
return &storageframework.PerTestConfig{
Driver: i,
Prefix: "iscsi",
@@ -315,12 +315,12 @@ func (i *iSCSIDriver) PrepareTest(f *framework.Framework) *storageframework.PerT
}
}
func (i *iSCSIDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
func (i *iSCSIDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
f := config.Framework
cs := f.ClientSet
ns := f.Namespace
c, serverPod, serverIP, iqn := newISCSIServer(cs, ns.Name)
c, serverPod, serverIP, iqn := newISCSIServer(ctx, cs, ns.Name)
config.ServerConfig = &c
config.ClientNodeSelection = c.ClientNodeSelection
return &iSCSIVolume{
@@ -332,7 +332,7 @@ func (i *iSCSIDriver) CreateVolume(config *storageframework.PerTestConfig, volTy
}
// newISCSIServer is an iSCSI-specific wrapper for CreateStorageServer.
func newISCSIServer(cs clientset.Interface, namespace string) (config e2evolume.TestConfig, pod *v1.Pod, ip, iqn string) {
func newISCSIServer(ctx context.Context, cs clientset.Interface, namespace string) (config e2evolume.TestConfig, pod *v1.Pod, ip, iqn string) {
// Generate cluster-wide unique IQN
iqn = fmt.Sprintf(iSCSIIQNTemplate, namespace)
config = e2evolume.TestConfig{
@@ -351,14 +351,14 @@ func newISCSIServer(cs clientset.Interface, namespace string) (config e2evolume.
ServerReadyMessage: "iscsi target started",
ServerHostNetwork: true,
}
pod, ip = e2evolume.CreateStorageServer(cs, config)
pod, ip = e2evolume.CreateStorageServer(ctx, cs, config)
// Make sure the client runs on the same node as server so we don't need to open any firewalls.
config.ClientNodeSelection = e2epod.NodeSelection{Name: pod.Spec.NodeName}
return config, pod, ip, iqn
}
// newRBDServer is a CephRBD-specific wrapper for CreateStorageServer.
func newRBDServer(cs clientset.Interface, namespace string) (config e2evolume.TestConfig, pod *v1.Pod, secret *v1.Secret, ip string) {
func newRBDServer(ctx context.Context, cs clientset.Interface, namespace string) (config e2evolume.TestConfig, pod *v1.Pod, secret *v1.Secret, ip string) {
config = e2evolume.TestConfig{
Namespace: namespace,
Prefix: "rbd",
@@ -369,7 +369,7 @@ func newRBDServer(cs clientset.Interface, namespace string) (config e2evolume.Te
},
ServerReadyMessage: "Ceph is ready",
}
pod, ip = e2evolume.CreateStorageServer(cs, config)
pod, ip = e2evolume.CreateStorageServer(ctx, cs, config)
// create secrets for the server
secret = &v1.Secret{
TypeMeta: metav1.TypeMeta{
@@ -386,7 +386,7 @@ func newRBDServer(cs clientset.Interface, namespace string) (config e2evolume.Te
Type: "kubernetes.io/rbd",
}
secret, err := cs.CoreV1().Secrets(config.Namespace).Create(context.TODO(), secret, metav1.CreateOptions{})
secret, err := cs.CoreV1().Secrets(config.Namespace).Create(ctx, secret, metav1.CreateOptions{})
if err != nil {
framework.Failf("Failed to create secrets for Ceph RBD: %v", err)
}
@@ -394,8 +394,8 @@ func newRBDServer(cs clientset.Interface, namespace string) (config e2evolume.Te
return config, pod, secret, ip
}
func (v *iSCSIVolume) DeleteVolume() {
cleanUpVolumeServer(v.f, v.serverPod)
func (v *iSCSIVolume) DeleteVolume(ctx context.Context) {
cleanUpVolumeServer(ctx, v.f, v.serverPod)
}
// Ceph RBD
@@ -501,7 +501,7 @@ func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2ev
return &pvSource, nil
}
func (r *rbdDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig {
func (r *rbdDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
return &storageframework.PerTestConfig{
Driver: r,
Prefix: "rbd",
@@ -509,12 +509,12 @@ func (r *rbdDriver) PrepareTest(f *framework.Framework) *storageframework.PerTes
}
}
func (r *rbdDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
func (r *rbdDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
f := config.Framework
cs := f.ClientSet
ns := f.Namespace
c, serverPod, secret, serverIP := newRBDServer(cs, ns.Name)
c, serverPod, secret, serverIP := newRBDServer(ctx, cs, ns.Name)
config.ServerConfig = &c
return &rbdVolume{
serverPod: serverPod,
@@ -524,8 +524,8 @@ func (r *rbdDriver) CreateVolume(config *storageframework.PerTestConfig, volType
}
}
func (v *rbdVolume) DeleteVolume() {
cleanUpVolumeServerWithSecret(v.f, v.serverPod, v.secret)
func (v *rbdVolume) DeleteVolume(ctx context.Context) {
cleanUpVolumeServerWithSecret(ctx, v.f, v.serverPod, v.secret)
}
// Ceph
@@ -616,7 +616,7 @@ func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e
}, nil
}
func (c *cephFSDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig {
func (c *cephFSDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
return &storageframework.PerTestConfig{
Driver: c,
Prefix: "cephfs",
@@ -624,12 +624,12 @@ func (c *cephFSDriver) PrepareTest(f *framework.Framework) *storageframework.Per
}
}
func (c *cephFSDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
func (c *cephFSDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
f := config.Framework
cs := f.ClientSet
ns := f.Namespace
cfg, serverPod, secret, serverIP := newRBDServer(cs, ns.Name)
cfg, serverPod, secret, serverIP := newRBDServer(ctx, cs, ns.Name)
config.ServerConfig = &cfg
return &cephVolume{
serverPod: serverPod,
@@ -639,8 +639,8 @@ func (c *cephFSDriver) CreateVolume(config *storageframework.PerTestConfig, volT
}
}
func (v *cephVolume) DeleteVolume() {
cleanUpVolumeServerWithSecret(v.f, v.serverPod, v.secret)
func (v *cephVolume) DeleteVolume(ctx context.Context) {
cleanUpVolumeServerWithSecret(ctx, v.f, v.serverPod, v.secret)
}
// Hostpath
@@ -693,7 +693,7 @@ func (h *hostPathDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume
}
}
func (h *hostPathDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig {
func (h *hostPathDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
return &storageframework.PerTestConfig{
Driver: h,
Prefix: "hostpath",
@@ -701,12 +701,12 @@ func (h *hostPathDriver) PrepareTest(f *framework.Framework) *storageframework.P
}
}
func (h *hostPathDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
func (h *hostPathDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
f := config.Framework
cs := f.ClientSet
// pods should be scheduled on the node
node, err := e2enode.GetRandomReadySchedulableNode(cs)
node, err := e2enode.GetRandomReadySchedulableNode(ctx, cs)
framework.ExpectNoError(err)
config.ClientNodeSelection = e2epod.NodeSelection{Name: node.Name}
return nil
@@ -774,7 +774,7 @@ func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, e2
}
}
func (h *hostPathSymlinkDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig {
func (h *hostPathSymlinkDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
return &storageframework.PerTestConfig{
Driver: h,
Prefix: "hostpathsymlink",
@@ -782,7 +782,7 @@ func (h *hostPathSymlinkDriver) PrepareTest(f *framework.Framework) *storagefram
}
}
func (h *hostPathSymlinkDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
func (h *hostPathSymlinkDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
f := config.Framework
cs := f.ClientSet
@@ -791,7 +791,7 @@ func (h *hostPathSymlinkDriver) CreateVolume(config *storageframework.PerTestCon
volumeName := "test-volume"
// pods should be scheduled on the node
node, err := e2enode.GetRandomReadySchedulableNode(cs)
node, err := e2enode.GetRandomReadySchedulableNode(ctx, cs)
framework.ExpectNoError(err)
config.ClientNodeSelection = e2epod.NodeSelection{Name: node.Name}
@@ -835,13 +835,13 @@ func (h *hostPathSymlinkDriver) CreateVolume(config *storageframework.PerTestCon
},
}
// h.prepPod will be reused in cleanupDriver.
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), prepPod, metav1.CreateOptions{})
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, prepPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "while creating hostPath init pod")
err = e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
err = e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "while waiting for hostPath init pod to succeed")
err = e2epod.DeletePodWithWait(f.ClientSet, pod)
err = e2epod.DeletePodWithWait(ctx, f.ClientSet, pod)
framework.ExpectNoError(err, "while deleting hostPath init pod")
return &hostPathSymlinkVolume{
sourcePath: sourcePath,
@@ -851,19 +851,19 @@ func (h *hostPathSymlinkDriver) CreateVolume(config *storageframework.PerTestCon
}
}
func (v *hostPathSymlinkVolume) DeleteVolume() {
func (v *hostPathSymlinkVolume) DeleteVolume(ctx context.Context) {
f := v.f
cmd := fmt.Sprintf("rm -rf %v&& rm -rf %v", v.targetPath, v.sourcePath)
v.prepPod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", cmd}
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), v.prepPod, metav1.CreateOptions{})
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, v.prepPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "while creating hostPath teardown pod")
err = e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
err = e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "while waiting for hostPath teardown pod to succeed")
err = e2epod.DeletePodWithWait(f.ClientSet, pod)
err = e2epod.DeletePodWithWait(ctx, f.ClientSet, pod)
framework.ExpectNoError(err, "while deleting hostPath teardown pod")
}
@@ -911,11 +911,11 @@ func (e *emptydirDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume
}
}
func (e *emptydirDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
func (e *emptydirDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
return nil
}
func (e *emptydirDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig {
func (e *emptydirDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
return &storageframework.PerTestConfig{
Driver: e,
Prefix: "emptydir",
@@ -968,7 +968,7 @@ func (c *cinderDriver) SkipUnsupportedTest(pattern storageframework.TestPattern)
e2eskipper.SkipUnlessProviderIs("openstack")
}
func (c *cinderDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
func (c *cinderDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
provisioner := "kubernetes.io/cinder"
parameters := map[string]string{}
if fsType != "" {
@@ -979,7 +979,7 @@ func (c *cinderDriver) GetDynamicProvisionStorageClass(config *storageframework.
return storageframework.GetStorageClass(provisioner, parameters, nil, ns)
}
func (c *cinderDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig {
func (c *cinderDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
return &storageframework.PerTestConfig{
Driver: c,
Prefix: "cinder",
@@ -1120,7 +1120,7 @@ func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2
return &pvSource, nil
}
func (g *gcePdDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
func (g *gcePdDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
provisioner := "kubernetes.io/gce-pd"
parameters := map[string]string{}
if fsType != "" {
@@ -1132,7 +1132,7 @@ func (g *gcePdDriver) GetDynamicProvisionStorageClass(config *storageframework.P
return storageframework.GetStorageClass(provisioner, parameters, &delayedBinding, ns)
}
func (g *gcePdDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig {
func (g *gcePdDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
config := &storageframework.PerTestConfig{
Driver: g,
Prefix: "gcepd",
@@ -1150,8 +1150,8 @@ func (g *gcePdDriver) PrepareTest(f *framework.Framework) *storageframework.PerT
}
func (g *gcePdDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
zone := getInlineVolumeZone(config.Framework)
func (g *gcePdDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
zone := getInlineVolumeZone(ctx, config.Framework)
if volType == storageframework.InlineVolume {
// PD will be created in framework.TestContext.CloudConfig.Zone zone,
// so pods should be also scheduled there.
@@ -1162,15 +1162,15 @@ func (g *gcePdDriver) CreateVolume(config *storageframework.PerTestConfig, volTy
}
}
ginkgo.By("creating a test gce pd volume")
vname, err := e2epv.CreatePDWithRetryAndZone(zone)
vname, err := e2epv.CreatePDWithRetryAndZone(ctx, zone)
framework.ExpectNoError(err)
return &gcePdVolume{
volumeName: vname,
}
}
func (v *gcePdVolume) DeleteVolume() {
e2epv.DeletePDWithRetry(v.volumeName)
func (v *gcePdVolume) DeleteVolume(ctx context.Context) {
_ = e2epv.DeletePDWithRetry(ctx, v.volumeName)
}
// vSphere
@@ -1269,7 +1269,7 @@ func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string,
return &pvSource, nil
}
func (v *vSphereDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
func (v *vSphereDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
provisioner := "kubernetes.io/vsphere-volume"
parameters := map[string]string{}
if fsType != "" {
@@ -1280,14 +1280,14 @@ func (v *vSphereDriver) GetDynamicProvisionStorageClass(config *storageframework
return storageframework.GetStorageClass(provisioner, parameters, nil, ns)
}
func (v *vSphereDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig {
func (v *vSphereDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
ginkgo.DeferCleanup(func(ctx context.Context) {
// Driver Cleanup function
// Logout each vSphere client connection to prevent session leakage
nodes := vspheretest.GetReadySchedulableNodeInfos()
nodes := vspheretest.GetReadySchedulableNodeInfos(ctx)
for _, node := range nodes {
if node.VSphere.Client != nil {
node.VSphere.Client.Logout(context.TODO())
_ = node.VSphere.Client.Logout(ctx)
}
}
})
@@ -1298,10 +1298,10 @@ func (v *vSphereDriver) PrepareTest(f *framework.Framework) *storageframework.Pe
}
}
func (v *vSphereDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
func (v *vSphereDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
f := config.Framework
vspheretest.Bootstrap(f)
nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo()
nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo(ctx)
volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef)
framework.ExpectNoError(err)
return &vSphereVolume{
@@ -1310,7 +1310,7 @@ func (v *vSphereDriver) CreateVolume(config *storageframework.PerTestConfig, vol
}
}
func (v *vSphereVolume) DeleteVolume() {
func (v *vSphereVolume) DeleteVolume(ctx context.Context) {
v.nodeInfo.VSphere.DeleteVolume(v.volumePath, v.nodeInfo.DataCenterRef)
}
@@ -1415,7 +1415,7 @@ func (a *azureDiskDriver) GetPersistentVolumeSource(readOnly bool, fsType string
return &pvSource, nil
}
func (a *azureDiskDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
func (a *azureDiskDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
provisioner := "kubernetes.io/azure-disk"
parameters := map[string]string{}
if fsType != "" {
@@ -1427,7 +1427,7 @@ func (a *azureDiskDriver) GetDynamicProvisionStorageClass(config *storageframewo
return storageframework.GetStorageClass(provisioner, parameters, &delayedBinding, ns)
}
func (a *azureDiskDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig {
func (a *azureDiskDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
return &storageframework.PerTestConfig{
Driver: a,
Prefix: "azure",
@@ -1435,9 +1435,9 @@ func (a *azureDiskDriver) PrepareTest(f *framework.Framework) *storageframework.
}
}
func (a *azureDiskDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
func (a *azureDiskDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
ginkgo.By("creating a test azure disk volume")
zone := getInlineVolumeZone(config.Framework)
zone := getInlineVolumeZone(ctx, config.Framework)
if volType == storageframework.InlineVolume {
// PD will be created in framework.TestContext.CloudConfig.Zone zone,
// so pods should be also scheduled there.
@@ -1447,15 +1447,15 @@ func (a *azureDiskDriver) CreateVolume(config *storageframework.PerTestConfig, v
},
}
}
volumeName, err := e2epv.CreatePDWithRetryAndZone(zone)
volumeName, err := e2epv.CreatePDWithRetryAndZone(ctx, zone)
framework.ExpectNoError(err)
return &azureDiskVolume{
volumeName: volumeName,
}
}
func (v *azureDiskVolume) DeleteVolume() {
e2epv.DeletePDWithRetry(v.volumeName)
func (v *azureDiskVolume) DeleteVolume(ctx context.Context) {
_ = e2epv.DeletePDWithRetry(ctx, v.volumeName)
}
// AWS
@@ -1554,7 +1554,7 @@ func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2ev
return &pvSource, nil
}
func (a *awsDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
func (a *awsDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
provisioner := "kubernetes.io/aws-ebs"
parameters := map[string]string{}
if fsType != "" {
@@ -1566,7 +1566,7 @@ func (a *awsDriver) GetDynamicProvisionStorageClass(config *storageframework.Per
return storageframework.GetStorageClass(provisioner, parameters, &delayedBinding, ns)
}
func (a *awsDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig {
func (a *awsDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
config := &storageframework.PerTestConfig{
Driver: a,
Prefix: "aws",
@@ -1583,8 +1583,8 @@ func (a *awsDriver) PrepareTest(f *framework.Framework) *storageframework.PerTes
return config
}
func (a *awsDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
zone := getInlineVolumeZone(config.Framework)
func (a *awsDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
zone := getInlineVolumeZone(ctx, config.Framework)
if volType == storageframework.InlineVolume || volType == storageframework.PreprovisionedPV {
// PD will be created in framework.TestContext.CloudConfig.Zone zone,
// so pods should be also scheduled there.
@@ -1595,15 +1595,15 @@ func (a *awsDriver) CreateVolume(config *storageframework.PerTestConfig, volType
}
}
ginkgo.By("creating a test aws volume")
vname, err := e2epv.CreatePDWithRetryAndZone(zone)
vname, err := e2epv.CreatePDWithRetryAndZone(ctx, zone)
framework.ExpectNoError(err)
return &awsVolume{
volumeName: vname,
}
}
func (v *awsVolume) DeleteVolume() {
e2epv.DeletePDWithRetry(v.volumeName)
func (v *awsVolume) DeleteVolume(ctx context.Context) {
_ = e2epv.DeletePDWithRetry(ctx, v.volumeName)
}
// local
@@ -1704,9 +1704,9 @@ func (l *localDriver) GetDriverInfo() *storageframework.DriverInfo {
func (l *localDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
}
func (l *localDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig {
func (l *localDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
var err error
l.node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet)
l.node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
framework.ExpectNoError(err)
l.hostExec = utils.NewHostExec(f)
@@ -1717,7 +1717,7 @@ func (l *localDriver) PrepareTest(f *framework.Framework) *storageframework.PerT
ssdInterface := "scsi"
filesystemType := "fs"
ssdCmd := fmt.Sprintf("ls -1 /mnt/disks/by-uuid/google-local-ssds-%s-%s/ | wc -l", ssdInterface, filesystemType)
res, err := l.hostExec.IssueCommandWithResult(ssdCmd, l.node)
res, err := l.hostExec.IssueCommandWithResult(ctx, ssdCmd, l.node)
framework.ExpectNoError(err)
num, err := strconv.Atoi(strings.TrimSpace(res))
framework.ExpectNoError(err)
@@ -1735,7 +1735,7 @@ func (l *localDriver) PrepareTest(f *framework.Framework) *storageframework.PerT
}
}
func (l *localDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
func (l *localDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
switch volType {
case storageframework.PreprovisionedPV:
node := l.node
@@ -1743,7 +1743,7 @@ func (l *localDriver) CreateVolume(config *storageframework.PerTestConfig, volTy
config.ClientNodeSelection = e2epod.NodeSelection{Name: node.Name}
return &localVolume{
ltrMgr: l.ltrMgr,
ltr: l.ltrMgr.Create(node, l.volumeType, nil),
ltr: l.ltrMgr.Create(ctx, node, l.volumeType, nil),
}
default:
framework.Failf("Unsupported volType: %v is specified", volType)
@@ -1751,8 +1751,8 @@ func (l *localDriver) CreateVolume(config *storageframework.PerTestConfig, volTy
return nil
}
func (v *localVolume) DeleteVolume() {
v.ltrMgr.Remove(v.ltr)
func (v *localVolume) DeleteVolume(ctx context.Context) {
v.ltrMgr.Remove(ctx, v.ltr)
}
func (l *localDriver) nodeAffinityForNode(node *v1.Node) *v1.VolumeNodeAffinity {
@@ -1795,16 +1795,16 @@ func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2
}
// cleanUpVolumeServer is a wrapper of cleanup function for volume server without secret created by specific CreateStorageServer function.
func cleanUpVolumeServer(f *framework.Framework, serverPod *v1.Pod) {
cleanUpVolumeServerWithSecret(f, serverPod, nil)
func cleanUpVolumeServer(ctx context.Context, f *framework.Framework, serverPod *v1.Pod) {
cleanUpVolumeServerWithSecret(ctx, f, serverPod, nil)
}
func getInlineVolumeZone(f *framework.Framework) string {
func getInlineVolumeZone(ctx context.Context, f *framework.Framework) string {
if framework.TestContext.CloudConfig.Zone != "" {
return framework.TestContext.CloudConfig.Zone
}
// if zone is not specified we will randomly pick a zone from schedulable nodes for inline tests
node, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
framework.ExpectNoError(err)
zone, ok := node.Labels[v1.LabelFailureDomainBetaZone]
if ok {
@@ -1814,20 +1814,20 @@ func getInlineVolumeZone(f *framework.Framework) string {
}
// cleanUpVolumeServerWithSecret is a wrapper of cleanup function for volume server with secret created by specific CreateStorageServer function.
func cleanUpVolumeServerWithSecret(f *framework.Framework, serverPod *v1.Pod, secret *v1.Secret) {
func cleanUpVolumeServerWithSecret(ctx context.Context, f *framework.Framework, serverPod *v1.Pod, secret *v1.Secret) {
cs := f.ClientSet
ns := f.Namespace
if secret != nil {
framework.Logf("Deleting server secret %q...", secret.Name)
err := cs.CoreV1().Secrets(ns.Name).Delete(context.TODO(), secret.Name, metav1.DeleteOptions{})
err := cs.CoreV1().Secrets(ns.Name).Delete(ctx, secret.Name, metav1.DeleteOptions{})
if err != nil {
framework.Logf("Delete secret failed: %v", err)
}
}
framework.Logf("Deleting server pod %q...", serverPod.Name)
err := e2epod.DeletePodWithWait(cs, serverPod)
err := e2epod.DeletePodWithWait(ctx, cs, serverPod)
if err != nil {
framework.Logf("Server pod delete failed: %v", err)
}
@@ -1916,7 +1916,7 @@ func (a *azureFileDriver) GetPersistentVolumeSource(readOnly bool, fsType string
return &pvSource, nil
}
func (a *azureFileDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
func (a *azureFileDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
provisioner := "kubernetes.io/azure-file"
parameters := map[string]string{}
ns := config.Framework.Namespace.Name
@@ -1924,7 +1924,7 @@ func (a *azureFileDriver) GetDynamicProvisionStorageClass(config *storageframewo
return storageframework.GetStorageClass(provisioner, parameters, &immediateBinding, ns)
}
func (a *azureFileDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig {
func (a *azureFileDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
return &storageframework.PerTestConfig{
Driver: a,
Prefix: "azure-file",
@@ -1932,7 +1932,7 @@ func (a *azureFileDriver) PrepareTest(f *framework.Framework) *storageframework.
}
}
func (a *azureFileDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
func (a *azureFileDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
ginkgo.By("creating a test azure file volume")
accountName, accountKey, shareName, err := e2epv.CreateShare()
framework.ExpectNoError(err)
@@ -1951,7 +1951,7 @@ func (a *azureFileDriver) CreateVolume(config *storageframework.PerTestConfig, v
Type: "Opaque",
}
_, err = config.Framework.ClientSet.CoreV1().Secrets(config.Framework.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{})
_, err = config.Framework.ClientSet.CoreV1().Secrets(config.Framework.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{})
framework.ExpectNoError(err)
return &azureFileVolume{
accountName: accountName,
@@ -1961,7 +1961,7 @@ func (a *azureFileDriver) CreateVolume(config *storageframework.PerTestConfig, v
}
}
func (v *azureFileVolume) DeleteVolume() {
func (v *azureFileVolume) DeleteVolume(ctx context.Context) {
err := e2epv.DeleteShare(v.accountName, v.shareName)
framework.ExpectNoError(err)
}