From 1cb121d2a9aa7d2ce8301f15d7c3717e0eb947d2 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 28 Dec 2018 23:22:06 +0100 Subject: [PATCH 1/5] e2e/storage: introduce TestVolume interface This increases type safety and makes the code easier to read because it becomes obvious that the "test resource" passed to some functions must be the result of a previous CreateVolume. This makes it possible to remove: - functions that never did anything (the DeleteVolume methods in drivers that never create a volume) - type casts (in the DeleteVolume implementation) - the unused DeleteVolume parameters - the stand-alone DeleteVolume function (which would be just a non-nil check) GetPersistentVolumeSource and GetVolumeSource could also become methods on more specific interfaces - they don't actually use anything from TestDriver instance which provides them. The main motivation however is to reduce the number of methods which might need an explicit test config parameter. --- test/e2e/storage/drivers/in_tree.go | 395 ++++++++---------- test/e2e/storage/testsuites/base.go | 12 +- .../storage/testsuites/driveroperations.go | 18 +- test/e2e/storage/testsuites/subpath.go | 2 +- test/e2e/storage/testsuites/testdriver.go | 18 +- test/e2e/storage/testsuites/volumemode.go | 11 +- 6 files changed, 203 insertions(+), 253 deletions(-) diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index 0cb78a84120..484e1384d16 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -69,9 +69,10 @@ type nfsDriver struct { driverInfo testsuites.DriverInfo } -type nfsTestResource struct { +type nfsVolume struct { serverIP string serverPod *v1.Pod + f *framework.Framework } var _ testsuites.TestDriver = &nfsDriver{} @@ -108,24 +109,24 @@ func (n *nfsDriver) GetDriverInfo() *testsuites.DriverInfo { func (n *nfsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { - ntr, ok := testResource.(*nfsTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to NFS Test Resource") +func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { + nv, ok := volume.(*nfsVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to NFS test volume") return &v1.VolumeSource{ NFS: &v1.NFSVolumeSource{ - Server: ntr.serverIP, + Server: nv.serverIP, Path: "/", ReadOnly: readOnly, }, } } -func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - ntr, ok := testResource.(*nfsTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to NFS Test Resource") +func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + nv, ok := volume.(*nfsVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to NFS test volume") return &v1.PersistentVolumeSource{ NFS: &v1.NFSVolumeSource{ - Server: ntr.serverIP, + Server: nv.serverIP, Path: "/", ReadOnly: readOnly, }, @@ -175,7 +176,7 @@ func (n *nfsDriver) CleanupDriver() { cs.RbacV1beta1().ClusterRoleBindings().Delete(clusterRoleBindingName, metav1.NewDeleteOptions(0)) } -func (n *nfsDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { +func (n *nfsDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { f := n.driverInfo.Config.Framework cs := f.ClientSet ns := f.Namespace @@ -189,9 +190,10 @@ func (n *nfsDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { case testpatterns.PreprovisionedPV: config, serverPod, serverIP := framework.NewNFSServer(cs, ns.Name, []string{}) n.driverInfo.Config.ServerConfig = &config - return &nfsTestResource{ + return &nfsVolume{ serverIP: serverIP, serverPod: serverPod, + f: f, } case testpatterns.DynamicPV: // Do nothing @@ -201,22 +203,8 @@ func (n *nfsDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { return nil } -func (n *nfsDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { - f := n.driverInfo.Config.Framework - - ntr, ok := testResource.(*nfsTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to NFS Test Resource") - - switch volType { - case testpatterns.InlineVolume: - fallthrough - case testpatterns.PreprovisionedPV: - framework.CleanUpVolumeServer(f, ntr.serverPod) - case testpatterns.DynamicPV: - // Do nothing - default: - framework.Failf("Unsupported volType:%v is specified", volType) - } +func (v *nfsVolume) DeleteVolume() { + framework.CleanUpVolumeServer(v.f, v.serverPod) } // Gluster @@ -224,9 +212,10 @@ type glusterFSDriver struct { driverInfo testsuites.DriverInfo } -type glusterTestResource struct { +type glusterVolume struct { prefix string serverPod *v1.Pod + f *framework.Framework } var _ testsuites.TestDriver = &glusterFSDriver{} @@ -261,11 +250,11 @@ func (g *glusterFSDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu", "custom") } -func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { - gtr, ok := testResource.(*glusterTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Gluster Test Resource") +func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { + gv, ok := volume.(*glusterVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to Gluster test volume") - name := gtr.prefix + "-server" + name := gv.prefix + "-server" return &v1.VolumeSource{ Glusterfs: &v1.GlusterfsVolumeSource{ EndpointsName: name, @@ -276,11 +265,11 @@ func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string, testReso } } -func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - gtr, ok := testResource.(*glusterTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Gluster Test Resource") +func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + gv, ok := volume.(*glusterVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to Gluster test volume") - name := gtr.prefix + "-server" + name := gv.prefix + "-server" return &v1.PersistentVolumeSource{ Glusterfs: &v1.GlusterfsPersistentVolumeSource{ EndpointsName: name, @@ -297,28 +286,26 @@ func (g *glusterFSDriver) CreateDriver() { func (g *glusterFSDriver) CleanupDriver() { } -func (g *glusterFSDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { +func (g *glusterFSDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { f := g.driverInfo.Config.Framework cs := f.ClientSet ns := f.Namespace config, serverPod, _ := framework.NewGlusterfsServer(cs, ns.Name) g.driverInfo.Config.ServerConfig = &config - return &glusterTestResource{ + return &glusterVolume{ prefix: config.Prefix, serverPod: serverPod, + f: f, } } -func (g *glusterFSDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { - f := g.driverInfo.Config.Framework +func (v *glusterVolume) DeleteVolume() { + f := v.f cs := f.ClientSet ns := f.Namespace - gtr, ok := testResource.(*glusterTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Gluster Test Resource") - - name := gtr.prefix + "-server" + name := v.prefix + "-server" framework.Logf("Deleting Gluster endpoints %q...", name) err := cs.CoreV1().Endpoints(ns.Name).Delete(name, nil) @@ -328,8 +315,8 @@ func (g *glusterFSDriver) DeleteVolume(volType testpatterns.TestVolType, testRes } framework.Logf("Gluster endpoints %q not found, assuming deleted", name) } - framework.Logf("Deleting Gluster server pod %q...", gtr.serverPod.Name) - err = framework.DeletePodWithWait(f, cs, gtr.serverPod) + framework.Logf("Deleting Gluster server pod %q...", v.serverPod.Name) + err = framework.DeletePodWithWait(f, cs, v.serverPod) if err != nil { framework.Failf("Gluster server pod delete failed: %v", err) } @@ -340,9 +327,10 @@ func (g *glusterFSDriver) DeleteVolume(volType testpatterns.TestVolType, testRes type iSCSIDriver struct { driverInfo testsuites.DriverInfo } -type iSCSITestResource struct { +type iSCSIVolume struct { serverPod *v1.Pod serverIP string + f *framework.Framework } var _ testsuites.TestDriver = &iSCSIDriver{} @@ -383,13 +371,13 @@ func (i *iSCSIDriver) GetDriverInfo() *testsuites.DriverInfo { func (i *iSCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { - itr, ok := testResource.(*iSCSITestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to iSCSI Test Resource") +func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { + iv, ok := volume.(*iSCSIVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to iSCSI test volume") volSource := v1.VolumeSource{ ISCSI: &v1.ISCSIVolumeSource{ - TargetPortal: itr.serverIP + ":3260", + TargetPortal: iv.serverIP + ":3260", // from test/images/volume/iscsi/initiatorname.iscsi IQN: "iqn.2003-01.org.linux-iscsi.f21.x8664:sn.4b0aae584f7c", Lun: 0, @@ -402,13 +390,13 @@ func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string, testResource return &volSource } -func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - itr, ok := testResource.(*iSCSITestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to iSCSI Test Resource") +func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + iv, ok := volume.(*iSCSIVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to iSCSI test volume") pvSource := v1.PersistentVolumeSource{ ISCSI: &v1.ISCSIPersistentVolumeSource{ - TargetPortal: itr.serverIP + ":3260", + TargetPortal: iv.serverIP + ":3260", IQN: "iqn.2003-01.org.linux-iscsi.f21.x8664:sn.4b0aae584f7c", Lun: 0, ReadOnly: readOnly, @@ -426,26 +414,22 @@ func (i *iSCSIDriver) CreateDriver() { func (i *iSCSIDriver) CleanupDriver() { } -func (i *iSCSIDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { +func (i *iSCSIDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { f := i.driverInfo.Config.Framework cs := f.ClientSet ns := f.Namespace config, serverPod, serverIP := framework.NewISCSIServer(cs, ns.Name) i.driverInfo.Config.ServerConfig = &config - return &iSCSITestResource{ + return &iSCSIVolume{ serverPod: serverPod, serverIP: serverIP, + f: f, } } -func (i *iSCSIDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { - f := i.driverInfo.Config.Framework - - itr, ok := testResource.(*iSCSITestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to iSCSI Test Resource") - - framework.CleanUpVolumeServer(f, itr.serverPod) +func (v *iSCSIVolume) DeleteVolume() { + framework.CleanUpVolumeServer(v.f, v.serverPod) } // Ceph RBD @@ -453,10 +437,11 @@ type rbdDriver struct { driverInfo testsuites.DriverInfo } -type rbdTestResource struct { +type rbdVolume struct { serverPod *v1.Pod serverIP string secret *v1.Secret + f *framework.Framework } var _ testsuites.TestDriver = &rbdDriver{} @@ -497,18 +482,18 @@ func (r *rbdDriver) GetDriverInfo() *testsuites.DriverInfo { func (r *rbdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { - rtr, ok := testResource.(*rbdTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to RBD Test Resource") +func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { + rv, ok := volume.(*rbdVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to RBD test volume") volSource := v1.VolumeSource{ RBD: &v1.RBDVolumeSource{ - CephMonitors: []string{rtr.serverIP}, + CephMonitors: []string{rv.serverIP}, RBDPool: "rbd", RBDImage: "foo", RadosUser: "admin", SecretRef: &v1.LocalObjectReference{ - Name: rtr.secret.Name, + Name: rv.secret.Name, }, ReadOnly: readOnly, }, @@ -519,21 +504,21 @@ func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, testResource i return &volSource } -func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { +func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { f := r.driverInfo.Config.Framework ns := f.Namespace - rtr, ok := testResource.(*rbdTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to RBD Test Resource") + rv, ok := volume.(*rbdVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to RBD test volume") pvSource := v1.PersistentVolumeSource{ RBD: &v1.RBDPersistentVolumeSource{ - CephMonitors: []string{rtr.serverIP}, + CephMonitors: []string{rv.serverIP}, RBDPool: "rbd", RBDImage: "foo", RadosUser: "admin", SecretRef: &v1.SecretReference{ - Name: rtr.secret.Name, + Name: rv.secret.Name, Namespace: ns.Name, }, ReadOnly: readOnly, @@ -551,27 +536,23 @@ func (r *rbdDriver) CreateDriver() { func (r *rbdDriver) CleanupDriver() { } -func (r *rbdDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { +func (r *rbdDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { f := r.driverInfo.Config.Framework cs := f.ClientSet ns := f.Namespace config, serverPod, secret, serverIP := framework.NewRBDServer(cs, ns.Name) r.driverInfo.Config.ServerConfig = &config - return &rbdTestResource{ + return &rbdVolume{ serverPod: serverPod, serverIP: serverIP, secret: secret, + f: f, } } -func (r *rbdDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { - f := r.driverInfo.Config.Framework - - rtr, ok := testResource.(*rbdTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to RBD Test Resource") - - framework.CleanUpVolumeServerWithSecret(f, rtr.serverPod, rtr.secret) +func (v *rbdVolume) DeleteVolume() { + framework.CleanUpVolumeServerWithSecret(v.f, v.serverPod, v.secret) } // Ceph @@ -583,10 +564,11 @@ type cephFSDriver struct { driverInfo testsuites.DriverInfo } -type cephTestResource struct { +type cephVolume struct { serverPod *v1.Pod serverIP string secret *v1.Secret + f *framework.Framework } var _ testsuites.TestDriver = &cephFSDriver{} @@ -621,35 +603,35 @@ func (c *cephFSDriver) GetDriverInfo() *testsuites.DriverInfo { func (c *cephFSDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { - ctr, ok := testResource.(*cephTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Ceph Test Resource") +func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { + cv, ok := volume.(*cephVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to Ceph test volume") return &v1.VolumeSource{ CephFS: &v1.CephFSVolumeSource{ - Monitors: []string{ctr.serverIP + ":6789"}, + Monitors: []string{cv.serverIP + ":6789"}, User: "kube", SecretRef: &v1.LocalObjectReference{ - Name: ctr.secret.Name, + Name: cv.secret.Name, }, ReadOnly: readOnly, }, } } -func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { +func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { f := c.driverInfo.Config.Framework ns := f.Namespace - ctr, ok := testResource.(*cephTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Ceph Test Resource") + cv, ok := volume.(*cephVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to Ceph test volume") return &v1.PersistentVolumeSource{ CephFS: &v1.CephFSPersistentVolumeSource{ - Monitors: []string{ctr.serverIP + ":6789"}, + Monitors: []string{cv.serverIP + ":6789"}, User: "kube", SecretRef: &v1.SecretReference{ - Name: ctr.secret.Name, + Name: cv.secret.Name, Namespace: ns.Name, }, ReadOnly: readOnly, @@ -663,27 +645,23 @@ func (c *cephFSDriver) CreateDriver() { func (c *cephFSDriver) CleanupDriver() { } -func (c *cephFSDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { +func (c *cephFSDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { f := c.driverInfo.Config.Framework cs := f.ClientSet ns := f.Namespace config, serverPod, secret, serverIP := framework.NewRBDServer(cs, ns.Name) c.driverInfo.Config.ServerConfig = &config - return &cephTestResource{ + return &cephVolume{ serverPod: serverPod, serverIP: serverIP, secret: secret, + f: f, } } -func (c *cephFSDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { - f := c.driverInfo.Config.Framework - - ctr, ok := testResource.(*cephTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Ceph Test Resource") - - framework.CleanUpVolumeServerWithSecret(f, ctr.serverPod, ctr.secret) +func (v *cephVolume) DeleteVolume() { + framework.CleanUpVolumeServerWithSecret(v.f, v.serverPod, v.secret) } // Hostpath @@ -722,7 +700,7 @@ func (h *hostPathDriver) GetDriverInfo() *testsuites.DriverInfo { func (h *hostPathDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (h *hostPathDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { +func (h *hostPathDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { // hostPath doesn't support readOnly volume if readOnly { return nil @@ -740,7 +718,7 @@ func (h *hostPathDriver) CreateDriver() { func (h *hostPathDriver) CleanupDriver() { } -func (h *hostPathDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { +func (h *hostPathDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { f := h.driverInfo.Config.Framework cs := f.ClientSet @@ -751,9 +729,6 @@ func (h *hostPathDriver) CreateVolume(volType testpatterns.TestVolType) interfac return nil } -func (h *hostPathDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { -} - // HostPathSymlink type hostPathSymlinkDriver struct { node v1.Node @@ -761,10 +736,11 @@ type hostPathSymlinkDriver struct { driverInfo testsuites.DriverInfo } -type hostPathSymlinkTestResource struct { +type hostPathSymlinkVolume struct { targetPath string sourcePath string prepPod *v1.Pod + f *framework.Framework } var _ testsuites.TestDriver = &hostPathSymlinkDriver{} @@ -796,9 +772,9 @@ func (h *hostPathSymlinkDriver) GetDriverInfo() *testsuites.DriverInfo { func (h *hostPathSymlinkDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { - htr, ok := testResource.(*hostPathSymlinkTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Hostpath Symlink Test Resource") +func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { + hv, ok := volume.(*hostPathSymlinkVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to Hostpath Symlink test volume") // hostPathSymlink doesn't support readOnly volume if readOnly { @@ -806,7 +782,7 @@ func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, te } return &v1.VolumeSource{ HostPath: &v1.HostPathVolumeSource{ - Path: htr.targetPath, + Path: hv.targetPath, }, } } @@ -817,7 +793,7 @@ func (h *hostPathSymlinkDriver) CreateDriver() { func (h *hostPathSymlinkDriver) CleanupDriver() { } -func (h *hostPathSymlinkDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { +func (h *hostPathSymlinkDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { f := h.driverInfo.Config.Framework cs := f.ClientSet @@ -878,23 +854,21 @@ func (h *hostPathSymlinkDriver) CreateVolume(volType testpatterns.TestVolType) i err = framework.DeletePodWithWait(f, f.ClientSet, pod) Expect(err).ToNot(HaveOccurred(), "while deleting hostPath init pod") - return &hostPathSymlinkTestResource{ + return &hostPathSymlinkVolume{ sourcePath: sourcePath, targetPath: targetPath, prepPod: prepPod, + f: f, } } -func (h *hostPathSymlinkDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { - f := h.driverInfo.Config.Framework +func (v *hostPathSymlinkVolume) DeleteVolume() { + f := v.f - htr, ok := testResource.(*hostPathSymlinkTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Hostpath Symlink Test Resource") + cmd := fmt.Sprintf("rm -rf %v&& rm -rf %v", v.targetPath, v.sourcePath) + v.prepPod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", cmd} - cmd := fmt.Sprintf("rm -rf %v&& rm -rf %v", htr.targetPath, htr.sourcePath) - htr.prepPod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", cmd} - - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(htr.prepPod) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(v.prepPod) Expect(err).ToNot(HaveOccurred(), "while creating hostPath teardown pod") err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) @@ -938,7 +912,7 @@ func (e *emptydirDriver) GetDriverInfo() *testsuites.DriverInfo { func (e *emptydirDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (e *emptydirDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { +func (e *emptydirDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { // emptydir doesn't support readOnly volume if readOnly { return nil @@ -948,13 +922,10 @@ func (e *emptydirDriver) GetVolumeSource(readOnly bool, fsType string, testResou } } -func (e *emptydirDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { +func (e *emptydirDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { return nil } -func (e *emptydirDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { -} - func (e *emptydirDriver) CreateDriver() { } @@ -970,7 +941,7 @@ type cinderDriver struct { driverInfo testsuites.DriverInfo } -type cinderTestResource struct { +type cinderVolume struct { volumeName string volumeID string } @@ -1010,13 +981,13 @@ func (c *cinderDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { framework.SkipUnlessProviderIs("openstack") } -func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { - ctr, ok := testResource.(*cinderTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Cinder Test Resource") +func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { + cv, ok := volume.(*cinderVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to Cinder test volume") volSource := v1.VolumeSource{ Cinder: &v1.CinderVolumeSource{ - VolumeID: ctr.volumeID, + VolumeID: cv.volumeID, ReadOnly: readOnly, }, } @@ -1026,13 +997,13 @@ func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string, testResourc return &volSource } -func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - ctr, ok := testResource.(*cinderTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Cinder Test Resource") +func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + cv, ok := volume.(*cinderVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to Cinder test volume") pvSource := v1.PersistentVolumeSource{ Cinder: &v1.CinderPersistentVolumeSource{ - VolumeID: ctr.volumeID, + VolumeID: cv.volumeID, ReadOnly: readOnly, }, } @@ -1064,7 +1035,7 @@ func (c *cinderDriver) CreateDriver() { func (c *cinderDriver) CleanupDriver() { } -func (c *cinderDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { +func (c *cinderDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { f := c.driverInfo.Config.Framework ns := f.Namespace @@ -1095,20 +1066,15 @@ func (c *cinderDriver) CreateVolume(volType testpatterns.TestVolType) interface{ } framework.Logf("Volume ID: %s", volumeID) Expect(volumeID).NotTo(Equal("")) - return &cinderTestResource{ + return &cinderVolume{ volumeName: volumeName, volumeID: volumeID, } } -func (c *cinderDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { - ctr, ok := testResource.(*cinderTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Cinder Test Resource") +func (v *cinderVolume) DeleteVolume() { + name := v.volumeName - deleteCinderVolume(ctr.volumeName) -} - -func deleteCinderVolume(name string) error { // Try to delete the volume for several seconds - it takes // a while for the plugin to detach it. var output []byte @@ -1120,12 +1086,11 @@ func deleteCinderVolume(name string) error { output, err = exec.Command("cinder", "delete", name).CombinedOutput() if err == nil { framework.Logf("Cinder volume %s deleted", name) - return nil + return } framework.Logf("Failed to delete volume %s: %v", name, err) } framework.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:])) - return err } // GCE @@ -1133,7 +1098,7 @@ type gcePdDriver struct { driverInfo testsuites.DriverInfo } -type gcePdTestResource struct { +type gcePdVolume struct { volumeName string } @@ -1177,12 +1142,12 @@ func (g *gcePdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { framework.SkipUnlessProviderIs("gce", "gke") } -func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { - gtr, ok := testResource.(*gcePdTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to GCE PD Test Resource") +func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { + gv, ok := volume.(*gcePdVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to GCE PD test volume") volSource := v1.VolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ - PDName: gtr.volumeName, + PDName: gv.volumeName, ReadOnly: readOnly, }, } @@ -1192,12 +1157,12 @@ func (g *gcePdDriver) GetVolumeSource(readOnly bool, fsType string, testResource return &volSource } -func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - gtr, ok := testResource.(*gcePdTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to GCE PD Test Resource") +func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + gv, ok := volume.(*gcePdVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to GCE PD test volume") pvSource := v1.PersistentVolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ - PDName: gtr.volumeName, + PDName: gv.volumeName, ReadOnly: readOnly, }, } @@ -1229,7 +1194,7 @@ func (g *gcePdDriver) CreateDriver() { func (g *gcePdDriver) CleanupDriver() { } -func (g *gcePdDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { +func (g *gcePdDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { if volType == testpatterns.InlineVolume { // PD will be created in framework.TestContext.CloudConfig.Zone zone, // so pods should be also scheduled there. @@ -1240,15 +1205,13 @@ func (g *gcePdDriver) CreateVolume(volType testpatterns.TestVolType) interface{} By("creating a test gce pd volume") vname, err := framework.CreatePDWithRetry() Expect(err).NotTo(HaveOccurred()) - return &gcePdTestResource{ + return &gcePdVolume{ volumeName: vname, } } -func (g *gcePdDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { - gtr, ok := testResource.(*gcePdTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to GCE PD Test Resource") - framework.DeletePDWithRetry(gtr.volumeName) +func (v *gcePdVolume) DeleteVolume() { + framework.DeletePDWithRetry(v.volumeName) } // vSphere @@ -1256,7 +1219,7 @@ type vSphereDriver struct { driverInfo testsuites.DriverInfo } -type vSphereTestResource struct { +type vSphereVolume struct { volumePath string nodeInfo *vspheretest.NodeInfo } @@ -1295,9 +1258,9 @@ func (v *vSphereDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { framework.SkipUnlessProviderIs("vsphere") } -func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { - vtr, ok := testResource.(*vSphereTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to vSphere Test Resource") +func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { + vsv, ok := volume.(*vSphereVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to vSphere test volume") // vSphere driver doesn't seem to support readOnly volume // TODO: check if it is correct @@ -1306,7 +1269,7 @@ func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, testResour } volSource := v1.VolumeSource{ VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{ - VolumePath: vtr.volumePath, + VolumePath: vsv.volumePath, }, } if fsType != "" { @@ -1315,9 +1278,9 @@ func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string, testResour return &volSource } -func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - vtr, ok := testResource.(*vSphereTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to vSphere Test Resource") +func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + vsv, ok := volume.(*vSphereVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to vSphere test volume") // vSphere driver doesn't seem to support readOnly volume // TODO: check if it is correct @@ -1326,7 +1289,7 @@ func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, } pvSource := v1.PersistentVolumeSource{ VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{ - VolumePath: vtr.volumePath, + VolumePath: vsv.volumePath, }, } if fsType != "" { @@ -1357,23 +1320,20 @@ func (v *vSphereDriver) CreateDriver() { func (v *vSphereDriver) CleanupDriver() { } -func (v *vSphereDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { +func (v *vSphereDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { f := v.driverInfo.Config.Framework vspheretest.Bootstrap(f) nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo() volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef) Expect(err).NotTo(HaveOccurred()) - return &vSphereTestResource{ + return &vSphereVolume{ volumePath: volumePath, nodeInfo: nodeInfo, } } -func (v *vSphereDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { - vtr, ok := testResource.(*vSphereTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to vSphere Test Resource") - - vtr.nodeInfo.VSphere.DeleteVolume(vtr.volumePath, vtr.nodeInfo.DataCenterRef) +func (v *vSphereVolume) DeleteVolume() { + v.nodeInfo.VSphere.DeleteVolume(v.volumePath, v.nodeInfo.DataCenterRef) } // Azure @@ -1381,7 +1341,7 @@ type azureDriver struct { driverInfo testsuites.DriverInfo } -type azureTestResource struct { +type azureVolume struct { volumeName string } @@ -1421,16 +1381,16 @@ func (a *azureDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { framework.SkipUnlessProviderIs("azure") } -func (a *azureDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { - atr, ok := testResource.(*azureTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Azure Test Resource") +func (a *azureDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { + av, ok := volume.(*azureVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to Azure test volume") - diskName := atr.volumeName[(strings.LastIndex(atr.volumeName, "/") + 1):] + diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):] volSource := v1.VolumeSource{ AzureDisk: &v1.AzureDiskVolumeSource{ DiskName: diskName, - DataDiskURI: atr.volumeName, + DataDiskURI: av.volumeName, ReadOnly: &readOnly, }, } @@ -1440,16 +1400,16 @@ func (a *azureDriver) GetVolumeSource(readOnly bool, fsType string, testResource return &volSource } -func (a *azureDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - atr, ok := testResource.(*azureTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Azure Test Resource") +func (a *azureDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + av, ok := volume.(*azureVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to Azure test volume") - diskName := atr.volumeName[(strings.LastIndex(atr.volumeName, "/") + 1):] + diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):] pvSource := v1.PersistentVolumeSource{ AzureDisk: &v1.AzureDiskVolumeSource{ DiskName: diskName, - DataDiskURI: atr.volumeName, + DataDiskURI: av.volumeName, ReadOnly: &readOnly, }, } @@ -1481,20 +1441,17 @@ func (a *azureDriver) CreateDriver() { func (a *azureDriver) CleanupDriver() { } -func (a *azureDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { +func (a *azureDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { By("creating a test azure disk volume") volumeName, err := framework.CreatePDWithRetry() Expect(err).NotTo(HaveOccurred()) - return &azureTestResource{ + return &azureVolume{ volumeName: volumeName, } } -func (a *azureDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { - atr, ok := testResource.(*azureTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to Azure Test Resource") - - framework.DeletePDWithRetry(atr.volumeName) +func (v *azureVolume) DeleteVolume() { + framework.DeletePDWithRetry(v.volumeName) } // AWS @@ -1545,7 +1502,7 @@ func (a *awsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { // TODO: Fix authorization error in attach operation and uncomment below /* -func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource { +func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) *v1.VolumeSource { volSource := v1.VolumeSource{ AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ VolumeID: a.volumeName, @@ -1558,7 +1515,7 @@ func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string, testResource i return &volSource } -func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { +func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { pvSource := v1.PersistentVolumeSource{ AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ VolumeID: a.volumeName, @@ -1596,14 +1553,14 @@ func (a *awsDriver) CleanupDriver() { // TODO: Fix authorization error in attach operation and uncomment below /* -func (a *awsDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { +func (a *awsDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { By("creating a test aws volume") var err error a.volumeName, err = framework.CreatePDWithRetry() Expect(err).NotTo(HaveOccurred()) } -func (a *awsDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { +DeleteVolume() { framework.DeletePDWithRetry(a.volumeName) } */ @@ -1619,6 +1576,11 @@ type localDriver struct { ltrMgr utils.LocalTestResourceManager } +type localVolume struct { + ltrMgr utils.LocalTestResourceManager + ltr *utils.LocalTestResource +} + var ( // capabilities defaultLocalVolumeCapabilities = map[testsuites.Capability]bool{ @@ -1727,29 +1689,24 @@ func (l *localDriver) randomNode() *v1.Node { return &node } -func (l *localDriver) CreateVolume(volType testpatterns.TestVolType) interface{} { +func (l *localDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { switch volType { case testpatterns.PreprovisionedPV: node := l.node // assign this to schedule pod on this node l.driverInfo.Config.ClientNodeName = node.Name - return l.ltrMgr.Create(node, l.volumeType, nil) + return &localVolume{ + ltrMgr: l.ltrMgr, + ltr: l.ltrMgr.Create(node, l.volumeType, nil), + } default: framework.Failf("Unsupported volType: %v is specified", volType) } return nil } -func (l *localDriver) DeleteVolume(volType testpatterns.TestVolType, testResource interface{}) { - ltr, ok := testResource.(*utils.LocalTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to local Test Resource") - switch volType { - case testpatterns.PreprovisionedPV: - l.ltrMgr.Remove(ltr) - default: - framework.Failf("Unsupported volType: %v is specified", volType) - } - return +func (v *localVolume) DeleteVolume() { + v.ltrMgr.Remove(v.ltr) } func (l *localDriver) nodeAffinityForNode(node *v1.Node) *v1.VolumeNodeAffinity { @@ -1778,13 +1735,13 @@ func (l *localDriver) nodeAffinityForNode(node *v1.Node) *v1.VolumeNodeAffinity } } -func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - ltr, ok := testResource.(*utils.LocalTestResource) - Expect(ok).To(BeTrue(), "Failed to cast test resource to local Test Resource") +func (l *localDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { + lv, ok := volume.(*localVolume) + Expect(ok).To(BeTrue(), "Failed to cast test volume to local test volume") return &v1.PersistentVolumeSource{ Local: &v1.LocalVolumeSource{ - Path: ltr.Path, + Path: lv.ltr.Path, FSType: &fsType, }, - }, l.nodeAffinityForNode(ltr.Node) + }, l.nodeAffinityForNode(lv.ltr.Node) } diff --git a/test/e2e/storage/testsuites/base.go b/test/e2e/storage/testsuites/base.go index 75425b98d33..e0c9e65f5be 100644 --- a/test/e2e/storage/testsuites/base.go +++ b/test/e2e/storage/testsuites/base.go @@ -147,7 +147,7 @@ type genericVolumeTestResource struct { pv *v1.PersistentVolume sc *storagev1.StorageClass - driverTestResource interface{} + volume TestVolume } var _ TestResource = &genericVolumeTestResource{} @@ -162,19 +162,19 @@ func (r *genericVolumeTestResource) setupResource(driver TestDriver, pattern tes volType := pattern.VolType // Create volume for pre-provisioned volume tests - r.driverTestResource = CreateVolume(driver, volType) + r.volume = CreateVolume(driver, volType) switch volType { case testpatterns.InlineVolume: framework.Logf("Creating resource for inline volume") if iDriver, ok := driver.(InlineVolumeTestDriver); ok { - r.volSource = iDriver.GetVolumeSource(false, fsType, r.driverTestResource) + r.volSource = iDriver.GetVolumeSource(false, fsType, r.volume) r.volType = dInfo.Name } case testpatterns.PreprovisionedPV: framework.Logf("Creating resource for pre-provisioned PV") if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok { - pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, fsType, r.driverTestResource) + pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, fsType, r.volume) if pvSource != nil { r.volSource, r.pv, r.pvc = createVolumeSourceWithPVCPV(f, dInfo.Name, pvSource, volumeNodeAffinity, false) } @@ -241,7 +241,9 @@ func (r *genericVolumeTestResource) cleanupResource(driver TestDriver, pattern t } // Cleanup volume for pre-provisioned volume tests - DeleteVolume(driver, volType, r.driverTestResource) + if r.volume != nil { + r.volume.DeleteVolume() + } } func createVolumeSourceWithPVCPV( diff --git a/test/e2e/storage/testsuites/driveroperations.go b/test/e2e/storage/testsuites/driveroperations.go index e0adbc06007..45132bfa803 100644 --- a/test/e2e/storage/testsuites/driveroperations.go +++ b/test/e2e/storage/testsuites/driveroperations.go @@ -37,7 +37,7 @@ func GetDriverNameWithFeatureTags(driver TestDriver) string { } // CreateVolume creates volume for test unless dynamicPV test -func CreateVolume(driver TestDriver, volType testpatterns.TestVolType) interface{} { +func CreateVolume(driver TestDriver, volType testpatterns.TestVolType) TestVolume { switch volType { case testpatterns.InlineVolume: fallthrough @@ -53,22 +53,6 @@ func CreateVolume(driver TestDriver, volType testpatterns.TestVolType) interface return nil } -// DeleteVolume deletes volume for test unless dynamicPV test -func DeleteVolume(driver TestDriver, volType testpatterns.TestVolType, testResource interface{}) { - switch volType { - case testpatterns.InlineVolume: - fallthrough - case testpatterns.PreprovisionedPV: - if pDriver, ok := driver.(PreprovisionedVolumeTestDriver); ok { - pDriver.DeleteVolume(volType, testResource) - } - case testpatterns.DynamicPV: - // No need to delete volume - default: - framework.Failf("Invalid volType specified: %v", volType) - } -} - // GetStorageClass constructs a new StorageClass instance // with a unique name that is based on namespace + suffix. func GetStorageClass( diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index 3e35e2ecda2..072182e838a 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -150,7 +150,7 @@ func (s *subPathTestResource) setupResource(driver TestDriver, pattern testpatte switch volType { case testpatterns.InlineVolume: if iDriver, ok := driver.(InlineVolumeTestDriver); ok { - s.roVolSource = iDriver.GetVolumeSource(true, fsType, s.genericVolumeTestResource.driverTestResource) + s.roVolSource = iDriver.GetVolumeSource(true, fsType, s.genericVolumeTestResource.volume) } case testpatterns.PreprovisionedPV: s.roVolSource = &v1.VolumeSource{ diff --git a/test/e2e/storage/testsuites/testdriver.go b/test/e2e/storage/testsuites/testdriver.go index 4e35afc2da8..e1880af4841 100644 --- a/test/e2e/storage/testsuites/testdriver.go +++ b/test/e2e/storage/testsuites/testdriver.go @@ -38,22 +38,28 @@ type TestDriver interface { SkipUnsupportedTest(testpatterns.TestPattern) } +// TestVolume is the result of PreprovisionedVolumeTestDriver.CreateVolume. +// The only common functionality is to delete it. Individual driver interfaces +// have additional methods that work with volumes created by them. +type TestVolume interface { + DeleteVolume() +} + // PreprovisionedVolumeTestDriver represents an interface for a TestDriver that has pre-provisioned volume type PreprovisionedVolumeTestDriver interface { TestDriver - // CreateVolume creates a pre-provisioned volume. - CreateVolume(testpatterns.TestVolType) interface{} - // DeleteVolume deletes a volume that is created in CreateVolume - DeleteVolume(testpatterns.TestVolType, interface{}) + // CreateVolume creates a pre-provisioned volume of the desired volume type. + CreateVolume(volumeType testpatterns.TestVolType) TestVolume } // InlineVolumeTestDriver represents an interface for a TestDriver that supports InlineVolume type InlineVolumeTestDriver interface { PreprovisionedVolumeTestDriver + // GetVolumeSource returns a volumeSource for inline volume. // It will set readOnly and fsType to the volumeSource, if TestDriver supports both of them. // It will return nil, if the TestDriver doesn't support either of the parameters. - GetVolumeSource(readOnly bool, fsType string, testResource interface{}) *v1.VolumeSource + GetVolumeSource(readOnly bool, fsType string, testVolume TestVolume) *v1.VolumeSource } // PreprovisionedPVTestDriver represents an interface for a TestDriver that supports PreprovisionedPV @@ -63,7 +69,7 @@ type PreprovisionedPVTestDriver interface { // It will set readOnly and fsType to the PersistentVolumeSource, if TestDriver supports both of them. // It will return nil, if the TestDriver doesn't support either of the parameters. // Volume node affinity is optional, it will be nil for volumes which does not have volume node affinity. - GetPersistentVolumeSource(readOnly bool, fsType string, testResource interface{}) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) + GetPersistentVolumeSource(readOnly bool, fsType string, testVolume TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) } // DynamicPVTestDriver represents an interface for a TestDriver that supports DynamicPV diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index 6b3ea843488..7ac60385eef 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -146,7 +146,7 @@ type volumeModeTestResource struct { pvc *v1.PersistentVolumeClaim pv *v1.PersistentVolume - driverTestResource interface{} + volume TestVolume } var _ TestResource = &volumeModeTestResource{} @@ -168,7 +168,7 @@ func (s *volumeModeTestResource) setupResource(driver TestDriver, pattern testpa ) // Create volume for pre-provisioned volume tests - s.driverTestResource = CreateVolume(driver, volType) + s.volume = CreateVolume(driver, volType) switch volType { case testpatterns.PreprovisionedPV: @@ -178,7 +178,7 @@ func (s *volumeModeTestResource) setupResource(driver TestDriver, pattern testpa scName = fmt.Sprintf("%s-%s-sc-for-file", ns.Name, dInfo.Name) } if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok { - pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, s.driverTestResource) + pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, s.volume) if pvSource == nil { framework.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name) } @@ -211,7 +211,6 @@ func (s *volumeModeTestResource) cleanupResource(driver TestDriver, pattern test f := dInfo.Config.Framework cs := f.ClientSet ns := f.Namespace - volType := pattern.VolType By("Deleting pv and pvc") errs := framework.PVPVCCleanup(cs, ns.Name, s.pv, s.pvc) @@ -224,7 +223,9 @@ func (s *volumeModeTestResource) cleanupResource(driver TestDriver, pattern test } // Cleanup volume for pre-provisioned volume tests - DeleteVolume(driver, volType, s.driverTestResource) + if s.volume != nil { + s.volume.DeleteVolume() + } } type volumeModeTestInput struct { From 05cc31697f811f3df62321a8a66363c144fd2fc7 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Sat, 29 Dec 2018 17:08:34 +0100 Subject: [PATCH 2/5] e2e/storage: speed up skipping, simplify APIs and test definition CreateDriver (now called SetupTest) is a potentially expensive operation, depending on the driver. Creating and tearing down a framework instance also takes time (measured at 6 seconds on a fast machine) and produces quite a bit of log output. Both can be avoided for tests that skip based on static information (like for instance the current OS, vendor, driver and test pattern) by making the test suite responsible for creating framework and driver. The lifecycle of the TestConfig instance was confusing because it was stored inside the DriverInfo, a struct which conceptually is static, while the TestConfig is dynamic. It is cleaner to separate the two, even if that means that an additional pointer must be passed into some functions. Now CreateDriver is responsible for initializing the PerTestConfig that is to be used by the test. To make this approach simpler to implement (= less functions which need the pointer) and the tests easier to read, the entire setup and test definition is now contained in a single function. This is how it is normally done in Ginkgo. This is easier to read because one can see at a glance where variables are set, instead of having to trace values though two additional structs (TestResource and TestInput). Because we are changing the API already, also other changes are made: - some function prototypes get simplified - the naming of functions is changed to match their purpose (tests aren't executed by the test suite, they only get defined for later execution) - unused methods get removed (TestSuite.skipUnsupportedTest is redundant) --- test/e2e/storage/csi_volumes.go | 191 +++---- test/e2e/storage/drivers/csi.go | 164 +++--- test/e2e/storage/drivers/in_tree.go | 340 ++++++----- test/e2e/storage/in_tree_volumes.go | 33 +- test/e2e/storage/regional_pd.go | 37 +- test/e2e/storage/testsuites/base.go | 109 +++- .../storage/testsuites/driveroperations.go | 9 +- test/e2e/storage/testsuites/provisioning.go | 296 +++++----- test/e2e/storage/testsuites/snapshottable.go | 317 ++++------- test/e2e/storage/testsuites/subpath.go | 384 ++++++------- test/e2e/storage/testsuites/testdriver.go | 59 +- test/e2e/storage/testsuites/volume_io.go | 118 ++-- test/e2e/storage/testsuites/volumemode.go | 531 ++++++++---------- test/e2e/storage/testsuites/volumes.go | 135 ++--- test/e2e/storage/volume_provisioning.go | 71 ++- 15 files changed, 1278 insertions(+), 1516 deletions(-) diff --git a/test/e2e/storage/csi_volumes.go b/test/e2e/storage/csi_volumes.go index 2ab9ca97026..f6bb547135a 100644 --- a/test/e2e/storage/csi_volumes.go +++ b/test/e2e/storage/csi_volumes.go @@ -17,10 +17,8 @@ limitations under the License. package storage import ( - "context" "encoding/json" "fmt" - "regexp" "strings" "time" @@ -32,7 +30,6 @@ import ( clientset "k8s.io/client-go/kubernetes" csiclient "k8s.io/csi-api/pkg/client/clientset/versioned" "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/framework/podlogs" "k8s.io/kubernetes/test/e2e/storage/drivers" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testsuites" @@ -47,9 +44,9 @@ import ( ) // List of testDrivers to be executed in below loop -var csiTestDrivers = []func(config testsuites.TestConfig) testsuites.TestDriver{ +var csiTestDrivers = []func() testsuites.TestDriver{ drivers.InitHostPathCSIDriver, - drivers.InitGcePDCSIDriver, + func() testsuites.TestDriver { return drivers.InitGcePDCSIDriver(false /* topology enabled */) }, drivers.InitGcePDExternalCSIDriver, drivers.InitHostPathV0CSIDriver, // Don't run tests with mock driver (drivers.InitMockCSIDriver), it does not provide persistent storage. @@ -81,117 +78,56 @@ func csiTunePattern(patterns []testpatterns.TestPattern) []testpatterns.TestPatt // This executes testSuites for csi volumes. var _ = utils.SIGDescribe("CSI Volumes", func() { - f := framework.NewDefaultFramework("csi-volumes") - - var ( - cancel context.CancelFunc - cs clientset.Interface - csics csiclient.Interface - ns *v1.Namespace - // Common configuration options for each driver. - config = testsuites.TestConfig{ - Framework: f, - Prefix: "csi", - } - ) - - BeforeEach(func() { - ctx, c := context.WithCancel(context.Background()) - cancel = c - cs = f.ClientSet - csics = f.CSIClientSet - ns = f.Namespace - - // Debugging of the following tests heavily depends on the log output - // of the different containers. Therefore include all of that in log - // files (when using --report-dir, as in the CI) or the output stream - // (otherwise). - to := podlogs.LogOutput{ - StatusWriter: GinkgoWriter, - } - if framework.TestContext.ReportDir == "" { - to.LogWriter = GinkgoWriter - } else { - test := CurrentGinkgoTestDescription() - reg := regexp.MustCompile("[^a-zA-Z0-9_-]+") - // We end the prefix with a slash to ensure that all logs - // end up in a directory named after the current test. - to.LogPathPrefix = framework.TestContext.ReportDir + "/" + - reg.ReplaceAllString(test.FullTestText, "_") + "/" - } - podlogs.CopyAllLogs(ctx, cs, ns.Name, to) - - // pod events are something that the framework already collects itself - // after a failed test. Logging them live is only useful for interactive - // debugging, not when we collect reports. - if framework.TestContext.ReportDir == "" { - podlogs.WatchPods(ctx, cs, ns.Name, GinkgoWriter) - } - }) - - AfterEach(func() { - cancel() - }) - for _, initDriver := range csiTestDrivers { - curDriver := initDriver(config) - curConfig := curDriver.GetDriverInfo().Config + curDriver := initDriver() + Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() { - BeforeEach(func() { - // Reset config. The driver might have modified its copy - // in a previous test. - curDriver.GetDriverInfo().Config = curConfig - - // setupDriver - curDriver.CreateDriver() - }) - - AfterEach(func() { - // Cleanup driver - curDriver.CleanupDriver() - }) - - testsuites.RunTestSuite(f, curDriver, csiTestSuites, csiTunePattern) + testsuites.DefineTestSuite(curDriver, csiTestSuites, csiTunePattern) }) } Context("CSI Topology test using GCE PD driver [Feature:CSINodeInfo]", func() { - newConfig := config - newConfig.TopologyEnabled = true - driver := drivers.InitGcePDCSIDriver(newConfig).(testsuites.DynamicPVTestDriver) // TODO (#71289) eliminate by moving this test to common test suite. + f := framework.NewDefaultFramework("csitopology") + driver := drivers.InitGcePDCSIDriver(true /* topology enabled */).(testsuites.DynamicPVTestDriver) // TODO (#71289) eliminate by moving this test to common test suite. + var ( + config *testsuites.PerTestConfig + testCleanup func() + ) BeforeEach(func() { - driver.CreateDriver() + config, testCleanup = driver.PrepareTest(f) }) AfterEach(func() { - driver.CleanupDriver() + if testCleanup != nil { + testCleanup() + } }) It("should provision zonal PD with immediate volume binding and AllowedTopologies set and mount the volume to a pod", func() { suffix := "topology-positive" - testTopologyPositive(cs, suffix, ns.GetName(), false /* delayBinding */, true /* allowedTopologies */) + testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), false /* delayBinding */, true /* allowedTopologies */) }) It("should provision zonal PD with delayed volume binding and mount the volume to a pod", func() { suffix := "delayed" - testTopologyPositive(cs, suffix, ns.GetName(), true /* delayBinding */, false /* allowedTopologies */) + testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */, false /* allowedTopologies */) }) It("should provision zonal PD with delayed volume binding and AllowedTopologies set and mount the volume to a pod", func() { suffix := "delayed-topology-positive" - testTopologyPositive(cs, suffix, ns.GetName(), true /* delayBinding */, true /* allowedTopologies */) + testTopologyPositive(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */, true /* allowedTopologies */) }) It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with immediate volume binding", func() { - framework.SkipUnlessMultizone(cs) + framework.SkipUnlessMultizone(config.Framework.ClientSet) suffix := "topology-negative" - testTopologyNegative(cs, suffix, ns.GetName(), false /* delayBinding */) + testTopologyNegative(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), false /* delayBinding */) }) It("should fail to schedule a pod with a zone missing from AllowedTopologies; PD is provisioned with delayed volume binding", func() { - framework.SkipUnlessMultizone(cs) + framework.SkipUnlessMultizone(config.Framework.ClientSet) suffix := "delayed-topology-negative" - testTopologyNegative(cs, suffix, ns.GetName(), true /* delayBinding */) + testTopologyNegative(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */) }) }) @@ -227,29 +163,30 @@ var _ = utils.SIGDescribe("CSI Volumes", func() { for _, t := range tests { test := t - It(test.name, func() { - By("Deploying mock CSI driver") - config := testsuites.TestConfig{ - Framework: f, - Prefix: "csi-attach", - } + f := framework.NewDefaultFramework("csiattach") - driver = drivers.InitMockCSIDriver(config, test.deployDriverCRD, test.driverAttachable, nil) - driver.CreateDriver() - defer driver.CleanupDriver() + It(test.name, func() { + cs := f.ClientSet + csics := f.CSIClientSet + ns := f.Namespace + + driver = drivers.InitMockCSIDriver(test.deployDriverCRD, test.driverAttachable, nil) + config, testCleanup := driver.PrepareTest(f) + driverName := config.GetUniqueDriverName() + defer testCleanup() if test.deployDriverCRD { - err = waitForCSIDriver(csics, driver) + err = waitForCSIDriver(csics, driverName) framework.ExpectNoError(err, "Failed to get CSIDriver: %v", err) - defer destroyCSIDriver(csics, driver) + defer destroyCSIDriver(csics, driverName) } By("Creating pod") var sc *storagev1.StorageClass if dDriver, ok := driver.(testsuites.DynamicPVTestDriver); ok { - sc = dDriver.GetDynamicProvisionStorageClass("") + sc = dDriver.GetDynamicProvisionStorageClass(config, "") } - nodeName := driver.GetDriverInfo().Config.ClientNodeName + nodeName := config.ClientNodeName scTest := testsuites.StorageClassTest{ Name: driver.GetDriverInfo().Name, Provisioner: sc.Provisioner, @@ -347,29 +284,30 @@ var _ = utils.SIGDescribe("CSI Volumes", func() { } for _, t := range tests { test := t - It(test.name, func() { - By("Deploying mock CSI driver") - config := testsuites.TestConfig{ - Framework: f, - Prefix: "csi-workload", - } + f := framework.NewDefaultFramework("csiworkload") - driver = drivers.InitMockCSIDriver(config, test.deployDriverCRD, true, test.podInfoOnMountVersion) - driver.CreateDriver() - defer driver.CleanupDriver() + It(test.name, func() { + cs := f.ClientSet + csics := f.CSIClientSet + ns := f.Namespace + + driver = drivers.InitMockCSIDriver(test.deployDriverCRD, true, test.podInfoOnMountVersion) + config, testCleanup := driver.PrepareTest(f) + driverName := config.GetUniqueDriverName() + defer testCleanup() if test.deployDriverCRD { - err = waitForCSIDriver(csics, driver) + err = waitForCSIDriver(csics, driverName) framework.ExpectNoError(err, "Failed to get CSIDriver: %v", err) - defer destroyCSIDriver(csics, driver) + defer destroyCSIDriver(csics, driverName) } By("Creating pod") var sc *storagev1.StorageClass if dDriver, ok := driver.(testsuites.DynamicPVTestDriver); ok { - sc = dDriver.GetDynamicProvisionStorageClass("") + sc = dDriver.GetDynamicProvisionStorageClass(config, "") } - nodeName := driver.GetDriverInfo().Config.ClientNodeName + nodeName := config.ClientNodeName scTest := testsuites.StorageClassTest{ Name: driver.GetDriverInfo().Name, Parameters: sc.Parameters, @@ -420,14 +358,16 @@ func testTopologyPositive(cs clientset.Interface, suffix, namespace string, dela topoZone := getRandomClusterZone(cs) addSingleCSIZoneAllowedTopologyToStorageClass(cs, class, topoZone) } - claim := newClaim(test, namespace, suffix) - claim.Spec.StorageClassName = &class.Name + test.Client = cs + test.Claim = newClaim(test, namespace, suffix) + test.Claim.Spec.StorageClassName = &class.Name + test.Class = class if delayBinding { - _, node := testsuites.TestBindingWaitForFirstConsumer(test, cs, claim, class, nil /* node selector */, false /* expect unschedulable */) + _, node := test.TestBindingWaitForFirstConsumer(nil /* node selector */, false /* expect unschedulable */) Expect(node).ToNot(BeNil(), "Unexpected nil node found") } else { - testsuites.TestDynamicProvisioning(test, cs, claim, class) + test.TestDynamicProvisioning() } } @@ -447,12 +387,13 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela test.DelayBinding = delayBinding nodeSelector := map[string]string{v1.LabelZoneFailureDomain: podZone} - class := newStorageClass(test, namespace, suffix) - addSingleCSIZoneAllowedTopologyToStorageClass(cs, class, pvZone) - claim := newClaim(test, namespace, suffix) - claim.Spec.StorageClassName = &class.Name + test.Client = cs + test.Class = newStorageClass(test, namespace, suffix) + addSingleCSIZoneAllowedTopologyToStorageClass(cs, test.Class, pvZone) + test.Claim = newClaim(test, namespace, suffix) + test.Claim.Spec.StorageClassName = &test.Class.Name if delayBinding { - testsuites.TestBindingWaitForFirstConsumer(test, cs, claim, class, nodeSelector, true /* expect unschedulable */) + test.TestBindingWaitForFirstConsumer(nodeSelector, true /* expect unschedulable */) } else { test.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { // Ensure that a pod cannot be scheduled in an unsuitable zone. @@ -461,13 +402,12 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela defer testsuites.StopPod(cs, pod) framework.ExpectNoError(framework.WaitForPodNameUnschedulableInNamespace(cs, pod.Name, pod.Namespace), "pod should be unschedulable") } - testsuites.TestDynamicProvisioning(test, cs, claim, class) + test.TestDynamicProvisioning() } } -func waitForCSIDriver(csics csiclient.Interface, driver testsuites.TestDriver) error { +func waitForCSIDriver(csics csiclient.Interface, driverName string) error { timeout := 2 * time.Minute - driverName := testsuites.GetUniqueDriverName(driver) framework.Logf("waiting up to %v for CSIDriver %q", timeout, driverName) for start := time.Now(); time.Since(start) < timeout; time.Sleep(framework.Poll) { @@ -479,8 +419,7 @@ func waitForCSIDriver(csics csiclient.Interface, driver testsuites.TestDriver) e return fmt.Errorf("gave up after waiting %v for CSIDriver %q.", timeout, driverName) } -func destroyCSIDriver(csics csiclient.Interface, driver testsuites.TestDriver) { - driverName := testsuites.GetUniqueDriverName(driver) +func destroyCSIDriver(csics csiclient.Interface, driverName string) { driverGet, err := csics.CsiV1alpha1().CSIDrivers().Get(driverName, metav1.GetOptions{}) if err == nil { framework.Logf("deleting %s.%s: %s", driverGet.TypeMeta.APIVersion, driverGet.TypeMeta.Kind, driverGet.ObjectMeta.Name) diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index 9d384c2e4fd..5afdd770db4 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -56,12 +56,11 @@ const ( // hostpathCSI type hostpathCSIDriver struct { - cleanup func() driverInfo testsuites.DriverInfo manifests []string } -func initHostPathCSIDriver(name string, config testsuites.TestConfig, capabilities map[testsuites.Capability]bool, manifests ...string) testsuites.TestDriver { +func initHostPathCSIDriver(name string, capabilities map[testsuites.Capability]bool, manifests ...string) testsuites.TestDriver { return &hostpathCSIDriver{ driverInfo: testsuites.DriverInfo{ Name: name, @@ -71,7 +70,6 @@ func initHostPathCSIDriver(name string, config testsuites.TestConfig, capabiliti "", // Default fsType ), Capabilities: capabilities, - Config: config, }, manifests: manifests, } @@ -82,8 +80,8 @@ var _ testsuites.DynamicPVTestDriver = &hostpathCSIDriver{} var _ testsuites.SnapshottableTestDriver = &hostpathCSIDriver{} // InitHostPathCSIDriver returns hostpathCSIDriver that implements TestDriver interface -func InitHostPathCSIDriver(config testsuites.TestConfig) testsuites.TestDriver { - return initHostPathCSIDriver("csi-hostpath", config, +func InitHostPathCSIDriver() testsuites.TestDriver { + return initHostPathCSIDriver("csi-hostpath", map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapDataSource: true, testsuites.CapMultiPODs: true}, "test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml", "test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml", @@ -104,19 +102,19 @@ func (h *hostpathCSIDriver) GetDriverInfo() *testsuites.DriverInfo { func (h *hostpathCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (h *hostpathCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { - provisioner := testsuites.GetUniqueDriverName(h) +func (h *hostpathCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { + provisioner := config.GetUniqueDriverName() parameters := map[string]string{} - ns := h.driverInfo.Config.Framework.Namespace.Name + ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-sc", provisioner) return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) } -func (h *hostpathCSIDriver) GetSnapshotClass() *unstructured.Unstructured { - snapshotter := testsuites.GetUniqueDriverName(h) +func (h *hostpathCSIDriver) GetSnapshotClass(config *testsuites.PerTestConfig) *unstructured.Unstructured { + snapshotter := config.GetUniqueDriverName() parameters := map[string]string{} - ns := h.driverInfo.Config.Framework.Namespace.Name + ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-vsc", snapshotter) return testsuites.GetSnapshotClass(snapshotter, parameters, ns, suffix) @@ -126,57 +124,60 @@ func (h *hostpathCSIDriver) GetClaimSize() string { return "5Gi" } -func (h *hostpathCSIDriver) CreateDriver() { +func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { By(fmt.Sprintf("deploying %s driver", h.driverInfo.Name)) - f := h.driverInfo.Config.Framework + cancelLogging := testsuites.StartPodLogs(f) cs := f.ClientSet // The hostpath CSI driver only works when everything runs on the same node. nodes := framework.GetReadySchedulableNodesOrDie(cs) nodeName := nodes.Items[rand.Intn(len(nodes.Items))].Name - h.driverInfo.Config.ClientNodeName = nodeName + config := &testsuites.PerTestConfig{ + Driver: h, + Prefix: "hostpath", + Framework: f, + ClientNodeName: nodeName, + } // TODO (?): the storage.csi.image.version and storage.csi.image.registry // settings are ignored for this test. We could patch the image definitions. o := utils.PatchCSIOptions{ OldDriverName: h.driverInfo.Name, - NewDriverName: testsuites.GetUniqueDriverName(h), + NewDriverName: config.GetUniqueDriverName(), DriverContainerName: "hostpath", - DriverContainerArguments: []string{"--drivername=csi-hostpath-" + f.UniqueName}, + DriverContainerArguments: []string{"--drivername=" + config.GetUniqueDriverName()}, ProvisionerContainerName: "csi-provisioner", SnapshotterContainerName: "csi-snapshotter", NodeName: nodeName, } - cleanup, err := h.driverInfo.Config.Framework.CreateFromManifests(func(item interface{}) error { - return utils.PatchCSIDeployment(h.driverInfo.Config.Framework, o, item) + cleanup, err := config.Framework.CreateFromManifests(func(item interface{}) error { + return utils.PatchCSIDeployment(config.Framework, o, item) }, h.manifests...) - h.cleanup = cleanup if err != nil { framework.Failf("deploying %s driver: %v", h.driverInfo.Name, err) } -} -func (h *hostpathCSIDriver) CleanupDriver() { - if h.cleanup != nil { + return config, func() { By(fmt.Sprintf("uninstalling %s driver", h.driverInfo.Name)) - h.cleanup() + cleanup() + cancelLogging() } } // mockCSI type mockCSIDriver struct { - cleanup func() driverInfo testsuites.DriverInfo manifests []string podInfoVersion *string + attachable bool } var _ testsuites.TestDriver = &mockCSIDriver{} var _ testsuites.DynamicPVTestDriver = &mockCSIDriver{} // InitMockCSIDriver returns a mockCSIDriver that implements TestDriver interface -func InitMockCSIDriver(config testsuites.TestConfig, registerDriver, driverAttachable bool, podInfoVersion *string) testsuites.TestDriver { +func InitMockCSIDriver(registerDriver, driverAttachable bool, podInfoVersion *string) testsuites.TestDriver { driverManifests := []string{ "test/e2e/testing-manifests/storage-csi/cluster-driver-registrar/rbac.yaml", "test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml", @@ -187,16 +188,12 @@ func InitMockCSIDriver(config testsuites.TestConfig, registerDriver, driverAttac "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml", } - config.ServerConfig = &framework.VolumeTestConfig{} - if registerDriver { driverManifests = append(driverManifests, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-cluster-driver-registrar.yaml") } if driverAttachable { driverManifests = append(driverManifests, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-attacher.yaml") - } else { - config.ServerConfig.ServerArgs = append(config.ServerConfig.ServerArgs, "--disable-attach") } return &mockCSIDriver{ @@ -212,10 +209,10 @@ func InitMockCSIDriver(config testsuites.TestConfig, registerDriver, driverAttac testsuites.CapFsGroup: false, testsuites.CapExec: false, }, - Config: config, }, manifests: driverManifests, podInfoVersion: podInfoVersion, + attachable: driverAttachable, } } @@ -226,10 +223,10 @@ func (m *mockCSIDriver) GetDriverInfo() *testsuites.DriverInfo { func (m *mockCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } -func (m *mockCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { - provisioner := testsuites.GetUniqueDriverName(m) +func (m *mockCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { + provisioner := config.GetUniqueDriverName() parameters := map[string]string{} - ns := m.driverInfo.Config.Framework.Namespace.Name + ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-sc", provisioner) return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) @@ -239,20 +236,24 @@ func (m *mockCSIDriver) GetClaimSize() string { return "5Gi" } -func (m *mockCSIDriver) CreateDriver() { +func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { By("deploying csi mock driver") - f := m.driverInfo.Config.Framework + cancelLogging := testsuites.StartPodLogs(f) cs := f.ClientSet // pods should be scheduled on the node nodes := framework.GetReadySchedulableNodesOrDie(cs) node := nodes.Items[rand.Intn(len(nodes.Items))] - m.driverInfo.Config.ClientNodeName = node.Name + config := &testsuites.PerTestConfig{ + Driver: m, + Prefix: "mock", + Framework: f, + ClientNodeName: node.Name, + } containerArgs := []string{"--name=csi-mock-" + f.UniqueName} - - if m.driverInfo.Config.ServerConfig != nil && m.driverInfo.Config.ServerConfig.ServerArgs != nil { - containerArgs = append(containerArgs, m.driverInfo.Config.ServerConfig.ServerArgs...) + if !m.attachable { + containerArgs = append(containerArgs, "--disable-attach") } // TODO (?): the storage.csi.image.version and storage.csi.image.registry @@ -264,29 +265,27 @@ func (m *mockCSIDriver) CreateDriver() { DriverContainerArguments: containerArgs, ProvisionerContainerName: "csi-provisioner", ClusterRegistrarContainerName: "csi-cluster-driver-registrar", - NodeName: m.driverInfo.Config.ClientNodeName, + NodeName: config.ClientNodeName, PodInfoVersion: m.podInfoVersion, } cleanup, err := f.CreateFromManifests(func(item interface{}) error { return utils.PatchCSIDeployment(f, o, item) }, m.manifests...) - m.cleanup = cleanup if err != nil { framework.Failf("deploying csi mock driver: %v", err) } -} -func (m *mockCSIDriver) CleanupDriver() { - if m.cleanup != nil { + return config, func() { By("uninstalling csi mock driver") - m.cleanup() + cleanup() + cancelLogging() } } // InitHostPathV0CSIDriver returns a variant of hostpathCSIDriver with different manifests. -func InitHostPathV0CSIDriver(config testsuites.TestConfig) testsuites.TestDriver { - return initHostPathCSIDriver("csi-hostpath-v0", config, +func InitHostPathV0CSIDriver() testsuites.TestDriver { + return initHostPathCSIDriver("csi-hostpath-v0", map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapMultiPODs: true}, "test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml", "test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml", @@ -300,16 +299,17 @@ func InitHostPathV0CSIDriver(config testsuites.TestConfig) testsuites.TestDriver // gce-pd type gcePDCSIDriver struct { - cleanup func() - driverInfo testsuites.DriverInfo + topologyEnabled bool + driverInfo testsuites.DriverInfo } var _ testsuites.TestDriver = &gcePDCSIDriver{} var _ testsuites.DynamicPVTestDriver = &gcePDCSIDriver{} // InitGcePDCSIDriver returns gcePDCSIDriver that implements TestDriver interface -func InitGcePDCSIDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitGcePDCSIDriver(topologyEnabled bool) testsuites.TestDriver { return &gcePDCSIDriver{ + topologyEnabled: topologyEnabled, driverInfo: testsuites.DriverInfo{ Name: GCEPDCSIProvisionerName, FeatureTag: "[Serial]", @@ -327,8 +327,6 @@ func InitGcePDCSIDriver(config testsuites.TestConfig) testsuites.TestDriver { testsuites.CapExec: true, testsuites.CapMultiPODs: true, }, - - Config: config, }, } } @@ -338,21 +336,14 @@ func (g *gcePDCSIDriver) GetDriverInfo() *testsuites.DriverInfo { } func (g *gcePDCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { - f := g.driverInfo.Config.Framework framework.SkipUnlessProviderIs("gce", "gke") - if !g.driverInfo.Config.TopologyEnabled { - // Topology is disabled in external-provisioner, so in a multizone cluster, a pod could be - // scheduled in a different zone from the provisioned volume, causing basic provisioning - // tests to fail. - framework.SkipIfMultizone(f.ClientSet) - } if pattern.FsType == "xfs" { framework.SkipUnlessNodeOSDistroIs("ubuntu", "custom") } } -func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { - ns := g.driverInfo.Config.Framework.Namespace.Name +func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { + ns := config.Framework.Namespace.Name provisioner := g.driverInfo.Name suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name) @@ -368,8 +359,16 @@ func (g *gcePDCSIDriver) GetClaimSize() string { return "5Gi" } -func (g *gcePDCSIDriver) CreateDriver() { +func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + if !g.topologyEnabled { + // Topology is disabled in external-provisioner, so in a multizone cluster, a pod could be + // scheduled in a different zone from the provisioned volume, causing basic provisioning + // tests to fail. + framework.SkipIfMultizone(f.ClientSet) + } + By("deploying csi gce-pd driver") + cancelLogging := testsuites.StartPodLogs(f) // It would be safer to rename the gcePD driver, but that // hasn't been done before either and attempts to do so now led to // errors during driver registration, therefore it is disabled @@ -382,7 +381,7 @@ func (g *gcePDCSIDriver) CreateDriver() { // DriverContainerName: "gce-driver", // ProvisionerContainerName: "csi-external-provisioner", // } - createGCESecrets(g.driverInfo.Config.Framework.ClientSet, g.driverInfo.Config.Framework.Namespace.Name) + createGCESecrets(f.ClientSet, f.Namespace.Name) manifests := []string{ "test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml", @@ -392,23 +391,25 @@ func (g *gcePDCSIDriver) CreateDriver() { "test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml", } - if g.driverInfo.Config.TopologyEnabled { + if g.topologyEnabled { manifests = append(manifests, "test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss_alpha.yaml") } else { manifests = append(manifests, "test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml") } - cleanup, err := g.driverInfo.Config.Framework.CreateFromManifests(nil, manifests...) - g.cleanup = cleanup + cleanup, err := f.CreateFromManifests(nil, manifests...) if err != nil { framework.Failf("deploying csi gce-pd driver: %v", err) } -} -func (g *gcePDCSIDriver) CleanupDriver() { - By("uninstalling gce-pd driver") - if g.cleanup != nil { - g.cleanup() - } + return &testsuites.PerTestConfig{ + Driver: g, + Prefix: "gcepd", + Framework: f, + }, func() { + By("uninstalling gce-pd driver") + cleanup() + cancelLogging() + } } // gcePd-external @@ -420,7 +421,7 @@ var _ testsuites.TestDriver = &gcePDExternalCSIDriver{} var _ testsuites.DynamicPVTestDriver = &gcePDExternalCSIDriver{} // InitGcePDExternalCSIDriver returns gcePDExternalCSIDriver that implements TestDriver interface -func InitGcePDExternalCSIDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitGcePDExternalCSIDriver() testsuites.TestDriver { return &gcePDExternalCSIDriver{ driverInfo: testsuites.DriverInfo{ Name: GCEPDCSIProvisionerName, @@ -440,8 +441,6 @@ func InitGcePDExternalCSIDriver(config testsuites.TestConfig) testsuites.TestDri testsuites.CapExec: true, testsuites.CapMultiPODs: true, }, - - Config: config, }, } } @@ -452,14 +451,13 @@ func (g *gcePDExternalCSIDriver) GetDriverInfo() *testsuites.DriverInfo { func (g *gcePDExternalCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { framework.SkipUnlessProviderIs("gce", "gke") - framework.SkipIfMultizone(g.driverInfo.Config.Framework.ClientSet) if pattern.FsType == "xfs" { framework.SkipUnlessNodeOSDistroIs("ubuntu", "custom") } } -func (g *gcePDExternalCSIDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { - ns := g.driverInfo.Config.Framework.Namespace.Name +func (g *gcePDExternalCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { + ns := config.Framework.Namespace.Name provisioner := g.driverInfo.Name suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name) @@ -475,8 +473,12 @@ func (g *gcePDExternalCSIDriver) GetClaimSize() string { return "5Gi" } -func (g *gcePDExternalCSIDriver) CreateDriver() { -} +func (g *gcePDExternalCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + framework.SkipIfMultizone(f.ClientSet) -func (g *gcePDExternalCSIDriver) CleanupDriver() { + return &testsuites.PerTestConfig{ + Driver: g, + Prefix: "gcepdext", + Framework: f, + }, func() {} } diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index 484e1384d16..66e2d3fe829 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -82,7 +82,7 @@ var _ testsuites.PreprovisionedPVTestDriver = &nfsDriver{} var _ testsuites.DynamicPVTestDriver = &nfsDriver{} // InitNFSDriver returns nfsDriver that implements TestDriver interface -func InitNFSDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitNFSDriver() testsuites.TestDriver { return &nfsDriver{ driverInfo: testsuites.DriverInfo{ Name: "nfs", @@ -96,8 +96,6 @@ func InitNFSDriver(config testsuites.TestConfig) testsuites.TestDriver { testsuites.CapPersistence: true, testsuites.CapExec: true, }, - - Config: config, }, } } @@ -133,10 +131,10 @@ func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volu }, nil } -func (n *nfsDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { +func (n *nfsDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := n.externalPluginName parameters := map[string]string{"mountOptions": "vers=4.1"} - ns := n.driverInfo.Config.Framework.Namespace.Name + ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-sc", n.driverInfo.Name) return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) @@ -146,8 +144,7 @@ func (n *nfsDriver) GetClaimSize() string { return "5Gi" } -func (n *nfsDriver) CreateDriver() { - f := n.driverInfo.Config.Framework +func (n *nfsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { cs := f.ClientSet ns := f.Namespace n.externalPluginName = fmt.Sprintf("example.com/nfs-%s", ns.Name) @@ -164,32 +161,32 @@ func (n *nfsDriver) CreateDriver() { By("creating an external dynamic provisioner pod") n.externalProvisionerPod = utils.StartExternalProvisioner(cs, ns.Name, n.externalPluginName) + + return &testsuites.PerTestConfig{ + Driver: n, + Prefix: "nfs", + Framework: f, + }, func() { + framework.ExpectNoError(framework.DeletePodWithWait(f, cs, n.externalProvisionerPod)) + clusterRoleBindingName := ns.Name + "--" + "cluster-admin" + cs.RbacV1beta1().ClusterRoleBindings().Delete(clusterRoleBindingName, metav1.NewDeleteOptions(0)) + } } -func (n *nfsDriver) CleanupDriver() { - f := n.driverInfo.Config.Framework - cs := f.ClientSet - ns := f.Namespace - - framework.ExpectNoError(framework.DeletePodWithWait(f, cs, n.externalProvisionerPod)) - clusterRoleBindingName := ns.Name + "--" + "cluster-admin" - cs.RbacV1beta1().ClusterRoleBindings().Delete(clusterRoleBindingName, metav1.NewDeleteOptions(0)) -} - -func (n *nfsDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { - f := n.driverInfo.Config.Framework +func (n *nfsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { + f := config.Framework cs := f.ClientSet ns := f.Namespace // NewNFSServer creates a pod for InlineVolume and PreprovisionedPV, // and startExternalProvisioner creates a pods for DynamicPV. - // Therefore, we need a different CreateDriver logic for volType. + // Therefore, we need a different PrepareTest logic for volType. switch volType { case testpatterns.InlineVolume: fallthrough case testpatterns.PreprovisionedPV: - config, serverPod, serverIP := framework.NewNFSServer(cs, ns.Name, []string{}) - n.driverInfo.Config.ServerConfig = &config + c, serverPod, serverIP := framework.NewNFSServer(cs, ns.Name, []string{}) + config.ServerConfig = &c return &nfsVolume{ serverIP: serverIP, serverPod: serverPod, @@ -224,7 +221,7 @@ var _ testsuites.InlineVolumeTestDriver = &glusterFSDriver{} var _ testsuites.PreprovisionedPVTestDriver = &glusterFSDriver{} // InitGlusterFSDriver returns glusterFSDriver that implements TestDriver interface -func InitGlusterFSDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitGlusterFSDriver() testsuites.TestDriver { return &glusterFSDriver{ driverInfo: testsuites.DriverInfo{ Name: "gluster", @@ -236,8 +233,6 @@ func InitGlusterFSDriver(config testsuites.TestConfig) testsuites.TestDriver { testsuites.CapPersistence: true, testsuites.CapExec: true, }, - - Config: config, }, } } @@ -280,19 +275,21 @@ func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string }, nil } -func (g *glusterFSDriver) CreateDriver() { +func (g *glusterFSDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + return &testsuites.PerTestConfig{ + Driver: g, + Prefix: "gluster", + Framework: f, + }, func() {} } -func (g *glusterFSDriver) CleanupDriver() { -} - -func (g *glusterFSDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { - f := g.driverInfo.Config.Framework +func (g *glusterFSDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { + f := config.Framework cs := f.ClientSet ns := f.Namespace - config, serverPod, _ := framework.NewGlusterfsServer(cs, ns.Name) - g.driverInfo.Config.ServerConfig = &config + c, serverPod, _ := framework.NewGlusterfsServer(cs, ns.Name) + config.ServerConfig = &c return &glusterVolume{ prefix: config.Prefix, serverPod: serverPod, @@ -339,7 +336,7 @@ var _ testsuites.InlineVolumeTestDriver = &iSCSIDriver{} var _ testsuites.PreprovisionedPVTestDriver = &iSCSIDriver{} // InitISCSIDriver returns iSCSIDriver that implements TestDriver interface -func InitISCSIDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitISCSIDriver() testsuites.TestDriver { return &iSCSIDriver{ driverInfo: testsuites.DriverInfo{ Name: "iscsi", @@ -358,8 +355,6 @@ func InitISCSIDriver(config testsuites.TestConfig) testsuites.TestDriver { testsuites.CapBlock: true, testsuites.CapExec: true, }, - - Config: config, }, } } @@ -408,19 +403,21 @@ func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, vo return &pvSource, nil } -func (i *iSCSIDriver) CreateDriver() { +func (i *iSCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + return &testsuites.PerTestConfig{ + Driver: i, + Prefix: "iscsi", + Framework: f, + }, func() {} } -func (i *iSCSIDriver) CleanupDriver() { -} - -func (i *iSCSIDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { - f := i.driverInfo.Config.Framework +func (i *iSCSIDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { + f := config.Framework cs := f.ClientSet ns := f.Namespace - config, serverPod, serverIP := framework.NewISCSIServer(cs, ns.Name) - i.driverInfo.Config.ServerConfig = &config + c, serverPod, serverIP := framework.NewISCSIServer(cs, ns.Name) + config.ServerConfig = &c return &iSCSIVolume{ serverPod: serverPod, serverIP: serverIP, @@ -450,7 +447,7 @@ var _ testsuites.InlineVolumeTestDriver = &rbdDriver{} var _ testsuites.PreprovisionedPVTestDriver = &rbdDriver{} // InitRbdDriver returns rbdDriver that implements TestDriver interface -func InitRbdDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitRbdDriver() testsuites.TestDriver { return &rbdDriver{ driverInfo: testsuites.DriverInfo{ Name: "rbd", @@ -469,8 +466,6 @@ func InitRbdDriver(config testsuites.TestConfig) testsuites.TestDriver { testsuites.CapBlock: true, testsuites.CapExec: true, }, - - Config: config, }, } } @@ -505,12 +500,12 @@ func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string, volume testsui } func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - f := r.driverInfo.Config.Framework - ns := f.Namespace - rv, ok := volume.(*rbdVolume) Expect(ok).To(BeTrue(), "Failed to cast test volume to RBD test volume") + f := rv.f + ns := f.Namespace + pvSource := v1.PersistentVolumeSource{ RBD: &v1.RBDPersistentVolumeSource{ CephMonitors: []string{rv.serverIP}, @@ -530,19 +525,21 @@ func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volu return &pvSource, nil } -func (r *rbdDriver) CreateDriver() { +func (r *rbdDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + return &testsuites.PerTestConfig{ + Driver: r, + Prefix: "rbd", + Framework: f, + }, func() {} } -func (r *rbdDriver) CleanupDriver() { -} - -func (r *rbdDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { - f := r.driverInfo.Config.Framework +func (r *rbdDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { + f := config.Framework cs := f.ClientSet ns := f.Namespace - config, serverPod, secret, serverIP := framework.NewRBDServer(cs, ns.Name) - r.driverInfo.Config.ServerConfig = &config + c, serverPod, secret, serverIP := framework.NewRBDServer(cs, ns.Name) + config.ServerConfig = &c return &rbdVolume{ serverPod: serverPod, serverIP: serverIP, @@ -577,7 +574,7 @@ var _ testsuites.InlineVolumeTestDriver = &cephFSDriver{} var _ testsuites.PreprovisionedPVTestDriver = &cephFSDriver{} // InitCephFSDriver returns cephFSDriver that implements TestDriver interface -func InitCephFSDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitCephFSDriver() testsuites.TestDriver { return &cephFSDriver{ driverInfo: testsuites.DriverInfo{ Name: "ceph", @@ -590,8 +587,6 @@ func InitCephFSDriver(config testsuites.TestConfig) testsuites.TestDriver { testsuites.CapPersistence: true, testsuites.CapExec: true, }, - - Config: config, }, } } @@ -620,12 +615,11 @@ func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string, volume test } func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volume testsuites.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) { - f := c.driverInfo.Config.Framework - ns := f.Namespace - cv, ok := volume.(*cephVolume) Expect(ok).To(BeTrue(), "Failed to cast test volume to Ceph test volume") + ns := cv.f.Namespace + return &v1.PersistentVolumeSource{ CephFS: &v1.CephFSPersistentVolumeSource{ Monitors: []string{cv.serverIP + ":6789"}, @@ -639,19 +633,21 @@ func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, v }, nil } -func (c *cephFSDriver) CreateDriver() { +func (c *cephFSDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + return &testsuites.PerTestConfig{ + Driver: c, + Prefix: "cephfs", + Framework: f, + }, func() {} } -func (c *cephFSDriver) CleanupDriver() { -} - -func (c *cephFSDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { - f := c.driverInfo.Config.Framework +func (c *cephFSDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { + f := config.Framework cs := f.ClientSet ns := f.Namespace - config, serverPod, secret, serverIP := framework.NewRBDServer(cs, ns.Name) - c.driverInfo.Config.ServerConfig = &config + cfg, serverPod, secret, serverIP := framework.NewRBDServer(cs, ns.Name) + config.ServerConfig = &cfg return &cephVolume{ serverPod: serverPod, serverIP: serverIP, @@ -676,7 +672,7 @@ var _ testsuites.PreprovisionedVolumeTestDriver = &hostPathDriver{} var _ testsuites.InlineVolumeTestDriver = &hostPathDriver{} // InitHostPathDriver returns hostPathDriver that implements TestDriver interface -func InitHostPathDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitHostPathDriver() testsuites.TestDriver { return &hostPathDriver{ driverInfo: testsuites.DriverInfo{ Name: "hostPath", @@ -687,8 +683,6 @@ func InitHostPathDriver(config testsuites.TestConfig) testsuites.TestDriver { Capabilities: map[testsuites.Capability]bool{ testsuites.CapPersistence: true, }, - - Config: config, }, } } @@ -712,20 +706,22 @@ func (h *hostPathDriver) GetVolumeSource(readOnly bool, fsType string, volume te } } -func (h *hostPathDriver) CreateDriver() { +func (h *hostPathDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + return &testsuites.PerTestConfig{ + Driver: h, + Prefix: "hostpath", + Framework: f, + }, func() {} } -func (h *hostPathDriver) CleanupDriver() { -} - -func (h *hostPathDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { - f := h.driverInfo.Config.Framework +func (h *hostPathDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { + f := config.Framework cs := f.ClientSet // pods should be scheduled on the node nodes := framework.GetReadySchedulableNodesOrDie(cs) node := nodes.Items[rand.Intn(len(nodes.Items))] - h.driverInfo.Config.ClientNodeName = node.Name + config.ClientNodeName = node.Name return nil } @@ -748,7 +744,7 @@ var _ testsuites.PreprovisionedVolumeTestDriver = &hostPathSymlinkDriver{} var _ testsuites.InlineVolumeTestDriver = &hostPathSymlinkDriver{} // InitHostPathSymlinkDriver returns hostPathSymlinkDriver that implements TestDriver interface -func InitHostPathSymlinkDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitHostPathSymlinkDriver() testsuites.TestDriver { return &hostPathSymlinkDriver{ driverInfo: testsuites.DriverInfo{ Name: "hostPathSymlink", @@ -759,8 +755,6 @@ func InitHostPathSymlinkDriver(config testsuites.TestConfig) testsuites.TestDriv Capabilities: map[testsuites.Capability]bool{ testsuites.CapPersistence: true, }, - - Config: config, }, } } @@ -787,14 +781,16 @@ func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, vo } } -func (h *hostPathSymlinkDriver) CreateDriver() { +func (h *hostPathSymlinkDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + return &testsuites.PerTestConfig{ + Driver: h, + Prefix: "hostpathsymlink", + Framework: f, + }, func() {} } -func (h *hostPathSymlinkDriver) CleanupDriver() { -} - -func (h *hostPathSymlinkDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { - f := h.driverInfo.Config.Framework +func (h *hostPathSymlinkDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { + f := config.Framework cs := f.ClientSet sourcePath := fmt.Sprintf("/tmp/%v", f.Namespace.Name) @@ -804,7 +800,7 @@ func (h *hostPathSymlinkDriver) CreateVolume(volType testpatterns.TestVolType) t // pods should be scheduled on the node nodes := framework.GetReadySchedulableNodesOrDie(cs) node := nodes.Items[rand.Intn(len(nodes.Items))] - h.driverInfo.Config.ClientNodeName = node.Name + config.ClientNodeName = node.Name cmd := fmt.Sprintf("mkdir %v -m 777 && ln -s %v %v", sourcePath, sourcePath, targetPath) privileged := true @@ -888,7 +884,7 @@ var _ testsuites.PreprovisionedVolumeTestDriver = &emptydirDriver{} var _ testsuites.InlineVolumeTestDriver = &emptydirDriver{} // InitEmptydirDriver returns emptydirDriver that implements TestDriver interface -func InitEmptydirDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitEmptydirDriver() testsuites.TestDriver { return &emptydirDriver{ driverInfo: testsuites.DriverInfo{ Name: "emptydir", @@ -899,8 +895,6 @@ func InitEmptydirDriver(config testsuites.TestConfig) testsuites.TestDriver { Capabilities: map[testsuites.Capability]bool{ testsuites.CapExec: true, }, - - Config: config, }, } } @@ -922,14 +916,16 @@ func (e *emptydirDriver) GetVolumeSource(readOnly bool, fsType string, volume te } } -func (e *emptydirDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { +func (e *emptydirDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { return nil } -func (e *emptydirDriver) CreateDriver() { -} - -func (e *emptydirDriver) CleanupDriver() { +func (e *emptydirDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + return &testsuites.PerTestConfig{ + Driver: e, + Prefix: "emptydir", + Framework: f, + }, func() {} } // Cinder @@ -953,7 +949,7 @@ var _ testsuites.PreprovisionedPVTestDriver = &cinderDriver{} var _ testsuites.DynamicPVTestDriver = &cinderDriver{} // InitCinderDriver returns cinderDriver that implements TestDriver interface -func InitCinderDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitCinderDriver() testsuites.TestDriver { return &cinderDriver{ driverInfo: testsuites.DriverInfo{ Name: "cinder", @@ -967,8 +963,6 @@ func InitCinderDriver(config testsuites.TestConfig) testsuites.TestDriver { testsuites.CapFsGroup: true, testsuites.CapExec: true, }, - - Config: config, }, } } @@ -1013,13 +1007,13 @@ func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string, v return &pvSource, nil } -func (c *cinderDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { +func (c *cinderDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := "kubernetes.io/cinder" parameters := map[string]string{} if fsType != "" { parameters["fsType"] = fsType } - ns := c.driverInfo.Config.Framework.Namespace.Name + ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-sc", c.driverInfo.Name) return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) @@ -1029,14 +1023,16 @@ func (c *cinderDriver) GetClaimSize() string { return "5Gi" } -func (c *cinderDriver) CreateDriver() { +func (c *cinderDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + return &testsuites.PerTestConfig{ + Driver: c, + Prefix: "cinder", + Framework: f, + }, func() {} } -func (c *cinderDriver) CleanupDriver() { -} - -func (c *cinderDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { - f := c.driverInfo.Config.Framework +func (c *cinderDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { + f := config.Framework ns := f.Namespace // We assume that namespace.Name is a random string @@ -1109,7 +1105,7 @@ var _ testsuites.PreprovisionedPVTestDriver = &gcePdDriver{} var _ testsuites.DynamicPVTestDriver = &gcePdDriver{} // InitGceDriver returns gcePdDriver that implements TestDriver interface -func InitGcePdDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitGcePdDriver() testsuites.TestDriver { return &gcePdDriver{ driverInfo: testsuites.DriverInfo{ Name: "gcepd", @@ -1128,8 +1124,6 @@ func InitGcePdDriver(config testsuites.TestConfig) testsuites.TestDriver { testsuites.CapBlock: true, testsuites.CapExec: true, }, - - Config: config, }, } } @@ -1172,13 +1166,13 @@ func (g *gcePdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, vo return &pvSource, nil } -func (g *gcePdDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { +func (g *gcePdDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := "kubernetes.io/gce-pd" parameters := map[string]string{} if fsType != "" { parameters["fsType"] = fsType } - ns := g.driverInfo.Config.Framework.Namespace.Name + ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name) return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) @@ -1188,17 +1182,19 @@ func (h *gcePdDriver) GetClaimSize() string { return "5Gi" } -func (g *gcePdDriver) CreateDriver() { +func (g *gcePdDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + return &testsuites.PerTestConfig{ + Driver: g, + Prefix: "gcepd", + Framework: f, + }, func() {} } -func (g *gcePdDriver) CleanupDriver() { -} - -func (g *gcePdDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { +func (g *gcePdDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { if volType == testpatterns.InlineVolume { // PD will be created in framework.TestContext.CloudConfig.Zone zone, // so pods should be also scheduled there. - g.driverInfo.Config.ClientNodeSelector = map[string]string{ + config.ClientNodeSelector = map[string]string{ v1.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone, } } @@ -1231,7 +1227,7 @@ var _ testsuites.PreprovisionedPVTestDriver = &vSphereDriver{} var _ testsuites.DynamicPVTestDriver = &vSphereDriver{} // InitVSphereDriver returns vSphereDriver that implements TestDriver interface -func InitVSphereDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitVSphereDriver() testsuites.TestDriver { return &vSphereDriver{ driverInfo: testsuites.DriverInfo{ Name: "vSphere", @@ -1245,8 +1241,6 @@ func InitVSphereDriver(config testsuites.TestConfig) testsuites.TestDriver { testsuites.CapFsGroup: true, testsuites.CapExec: true, }, - - Config: config, }, } } @@ -1298,13 +1292,13 @@ func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string, return &pvSource, nil } -func (v *vSphereDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { +func (v *vSphereDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := "kubernetes.io/vsphere-volume" parameters := map[string]string{} if fsType != "" { parameters["fsType"] = fsType } - ns := v.driverInfo.Config.Framework.Namespace.Name + ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-sc", v.driverInfo.Name) return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) @@ -1314,14 +1308,16 @@ func (v *vSphereDriver) GetClaimSize() string { return "5Gi" } -func (v *vSphereDriver) CreateDriver() { +func (v *vSphereDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + return &testsuites.PerTestConfig{ + Driver: v, + Prefix: "vsphere", + Framework: f, + }, func() {} } -func (v *vSphereDriver) CleanupDriver() { -} - -func (v *vSphereDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { - f := v.driverInfo.Config.Framework +func (v *vSphereDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { + f := config.Framework vspheretest.Bootstrap(f) nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo() volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef) @@ -1352,7 +1348,7 @@ var _ testsuites.PreprovisionedPVTestDriver = &azureDriver{} var _ testsuites.DynamicPVTestDriver = &azureDriver{} // InitAzureDriver returns azureDriver that implements TestDriver interface -func InitAzureDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitAzureDriver() testsuites.TestDriver { return &azureDriver{ driverInfo: testsuites.DriverInfo{ Name: "azure", @@ -1367,8 +1363,6 @@ func InitAzureDriver(config testsuites.TestConfig) testsuites.TestDriver { testsuites.CapBlock: true, testsuites.CapExec: true, }, - - Config: config, }, } } @@ -1419,13 +1413,13 @@ func (a *azureDriver) GetPersistentVolumeSource(readOnly bool, fsType string, vo return &pvSource, nil } -func (a *azureDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { +func (a *azureDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := "kubernetes.io/azure-disk" parameters := map[string]string{} if fsType != "" { parameters["fsType"] = fsType } - ns := a.driverInfo.Config.Framework.Namespace.Name + ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name) return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) @@ -1435,13 +1429,15 @@ func (a *azureDriver) GetClaimSize() string { return "5Gi" } -func (a *azureDriver) CreateDriver() { +func (a *azureDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + return &testsuites.PerTestConfig{ + Driver: a, + Prefix: "azure", + Framework: f, + }, func() {} } -func (a *azureDriver) CleanupDriver() { -} - -func (a *azureDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { +func (a *azureDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { By("creating a test azure disk volume") volumeName, err := framework.CreatePDWithRetry() Expect(err).NotTo(HaveOccurred()) @@ -1470,7 +1466,7 @@ var _ testsuites.TestDriver = &awsDriver{} var _ testsuites.DynamicPVTestDriver = &awsDriver{} // InitAwsDriver returns awsDriver that implements TestDriver interface -func InitAwsDriver(config testsuites.TestConfig) testsuites.TestDriver { +func InitAwsDriver() testsuites.TestDriver { return &awsDriver{ driverInfo: testsuites.DriverInfo{ Name: "aws", @@ -1486,8 +1482,6 @@ func InitAwsDriver(config testsuites.TestConfig) testsuites.TestDriver { testsuites.CapBlock: true, testsuites.CapExec: true, }, - - Config: config, }, } } @@ -1529,13 +1523,13 @@ func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string, volu } */ -func (a *awsDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { +func (a *awsDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass { provisioner := "kubernetes.io/aws-ebs" parameters := map[string]string{} if fsType != "" { parameters["fsType"] = fsType } - ns := a.driverInfo.Config.Framework.Namespace.Name + ns := config.Framework.Namespace.Name suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name) return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix) @@ -1545,15 +1539,17 @@ func (a *awsDriver) GetClaimSize() string { return "5Gi" } -func (a *awsDriver) CreateDriver() { -} - -func (a *awsDriver) CleanupDriver() { +func (a *awsDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { + return &testsuites.PerTestConfig{ + Driver: a, + Prefix: "aws", + Framework: f, + }, func() {} } // TODO: Fix authorization error in attach operation and uncomment below /* -func (a *awsDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { +func (a *awsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { By("creating a test aws volume") var err error a.volumeName, err = framework.CreatePDWithRetry() @@ -1617,7 +1613,7 @@ var _ testsuites.TestDriver = &localDriver{} var _ testsuites.PreprovisionedVolumeTestDriver = &localDriver{} var _ testsuites.PreprovisionedPVTestDriver = &localDriver{} -func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func(config testsuites.TestConfig) testsuites.TestDriver { +func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func() testsuites.TestDriver { maxFileSize := defaultLocalVolumeMaxFileSize if maxFileSizeByVolType, ok := localVolumeMaxFileSizes[volumeType]; ok { maxFileSize = maxFileSizeByVolType @@ -1630,8 +1626,7 @@ func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func(config if capabilitiesByType, ok := localVolumeCapabitilies[volumeType]; ok { capabilities = capabilitiesByType } - return func(config testsuites.TestConfig) testsuites.TestDriver { - hostExec := utils.NewHostExec(config.Framework) + return func() testsuites.TestDriver { // custom tag to distinguish from tests of other volume types featureTag := fmt.Sprintf("[LocalVolumeType: %s]", volumeType) // For GCE Local SSD volumes, we must run serially @@ -1645,11 +1640,8 @@ func InitLocalDriverWithVolumeType(volumeType utils.LocalVolumeType) func(config MaxFileSize: maxFileSize, SupportedFsType: supportedFsTypes, Capabilities: capabilities, - Config: config, }, - hostExec: hostExec, volumeType: volumeType, - ltrMgr: utils.NewLocalResourceManager("local-driver", hostExec, "/tmp"), } } } @@ -1673,28 +1665,30 @@ func (l *localDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { } } -func (l *localDriver) CreateDriver() { +func (l *localDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) { // choose a randome node to test against - l.node = l.randomNode() -} - -func (l *localDriver) CleanupDriver() { - l.hostExec.Cleanup() -} - -func (l *localDriver) randomNode() *v1.Node { - f := l.driverInfo.Config.Framework nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - node := nodes.Items[rand.Intn(len(nodes.Items))] - return &node + l.node = &nodes.Items[rand.Intn(len(nodes.Items))] + + l.hostExec = utils.NewHostExec(f) + l.ltrMgr = utils.NewLocalResourceManager("local-driver", l.hostExec, "/tmp") + + return &testsuites.PerTestConfig{ + Driver: l, + Prefix: "local", + Framework: f, + ClientNodeName: l.node.Name, + }, func() { + l.hostExec.Cleanup() + } } -func (l *localDriver) CreateVolume(volType testpatterns.TestVolType) testsuites.TestVolume { +func (l *localDriver) CreateVolume(config *testsuites.PerTestConfig, volType testpatterns.TestVolType) testsuites.TestVolume { switch volType { case testpatterns.PreprovisionedPV: node := l.node // assign this to schedule pod on this node - l.driverInfo.Config.ClientNodeName = node.Name + config.ClientNodeName = node.Name return &localVolume{ ltrMgr: l.ltrMgr, ltr: l.ltrMgr.Create(node, l.volumeType, nil), diff --git a/test/e2e/storage/in_tree_volumes.go b/test/e2e/storage/in_tree_volumes.go index f1b33791430..f9b7c97901d 100644 --- a/test/e2e/storage/in_tree_volumes.go +++ b/test/e2e/storage/in_tree_volumes.go @@ -18,7 +18,6 @@ package storage import ( . "github.com/onsi/ginkgo" - "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/drivers" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testsuites" @@ -26,7 +25,7 @@ import ( ) // List of testDrivers to be executed in below loop -var testDrivers = []func(config testsuites.TestConfig) testsuites.TestDriver{ +var testDrivers = []func() testsuites.TestDriver{ drivers.InitNFSDriver, drivers.InitGlusterFSDriver, drivers.InitISCSIDriver, @@ -65,35 +64,11 @@ func intreeTunePattern(patterns []testpatterns.TestPattern) []testpatterns.TestP // This executes testSuites for in-tree volumes. var _ = utils.SIGDescribe("In-tree Volumes", func() { - f := framework.NewDefaultFramework("volumes") - - var ( - // Common configuration options for all drivers. - config = testsuites.TestConfig{ - Framework: f, - Prefix: "in-tree", - } - ) - for _, initDriver := range testDrivers { - curDriver := initDriver(config) - curConfig := curDriver.GetDriverInfo().Config + curDriver := initDriver() + Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() { - BeforeEach(func() { - // Reset config. The driver might have modified its copy - // in a previous test. - curDriver.GetDriverInfo().Config = curConfig - - // setupDriver - curDriver.CreateDriver() - }) - - AfterEach(func() { - // Cleanup driver - curDriver.CleanupDriver() - }) - - testsuites.RunTestSuite(f, curDriver, testSuites, intreeTunePattern) + testsuites.DefineTestSuite(curDriver, testSuites, intreeTunePattern) }) } }) diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index b58a04568ce..5cf05f9816a 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -143,10 +143,11 @@ func testVolumeProvisioning(c clientset.Interface, ns string) { } for _, test := range tests { - class := newStorageClass(test, ns, "" /* suffix */) - claim := newClaim(test, ns, "" /* suffix */) - claim.Spec.StorageClassName = &class.Name - testsuites.TestDynamicProvisioning(test, c, claim, class) + test.Client = c + test.Class = newStorageClass(test, ns, "" /* suffix */) + test.Claim = newClaim(test, ns, "" /* suffix */) + test.Claim.Spec.StorageClassName = &test.Class.Name + test.TestDynamicProvisioning() } } @@ -301,6 +302,7 @@ func addTaint(c clientset.Interface, ns string, nodes []v1.Node, podZone string) func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int) { test := testsuites.StorageClassTest{ + Client: c, Name: "Regional PD storage class with waitForFirstConsumer test on GCE", Provisioner: "kubernetes.io/gce-pd", Parameters: map[string]string{ @@ -312,14 +314,14 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int) } suffix := "delayed-regional" - class := newStorageClass(test, ns, suffix) + test.Class = newStorageClass(test, ns, suffix) var claims []*v1.PersistentVolumeClaim for i := 0; i < pvcCount; i++ { claim := newClaim(test, ns, suffix) - claim.Spec.StorageClassName = &class.Name + claim.Spec.StorageClassName = &test.Class.Name claims = append(claims, claim) } - pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class, nil /* node selector */, false /* expect unschedulable */) + pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */) if node == nil { framework.Failf("unexpected nil node found") } @@ -345,17 +347,20 @@ func testRegionalAllowedTopologies(c clientset.Interface, ns string) { } suffix := "topo-regional" - class := newStorageClass(test, ns, suffix) + test.Client = c + test.Class = newStorageClass(test, ns, suffix) zones := getTwoRandomZones(c) - addAllowedTopologiesToStorageClass(c, class, zones) - claim := newClaim(test, ns, suffix) - claim.Spec.StorageClassName = &class.Name - pv := testsuites.TestDynamicProvisioning(test, c, claim, class) + addAllowedTopologiesToStorageClass(c, test.Class, zones) + test.Claim = newClaim(test, ns, suffix) + test.Claim.Spec.StorageClassName = &test.Class.Name + + pv := test.TestDynamicProvisioning() checkZonesFromLabelAndAffinity(pv, sets.NewString(zones...), true) } func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns string, pvcCount int) { test := testsuites.StorageClassTest{ + Client: c, Name: "Regional PD storage class with allowedTopologies and waitForFirstConsumer test on GCE", Provisioner: "kubernetes.io/gce-pd", Parameters: map[string]string{ @@ -367,16 +372,16 @@ func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns s } suffix := "topo-delayed-regional" - class := newStorageClass(test, ns, suffix) + test.Class = newStorageClass(test, ns, suffix) topoZones := getTwoRandomZones(c) - addAllowedTopologiesToStorageClass(c, class, topoZones) + addAllowedTopologiesToStorageClass(c, test.Class, topoZones) var claims []*v1.PersistentVolumeClaim for i := 0; i < pvcCount; i++ { claim := newClaim(test, ns, suffix) - claim.Spec.StorageClassName = &class.Name + claim.Spec.StorageClassName = &test.Class.Name claims = append(claims, claim) } - pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class, nil /* node selector */, false /* expect unschedulable */) + pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */) if node == nil { framework.Failf("unexpected nil node found") } diff --git a/test/e2e/storage/testsuites/base.go b/test/e2e/storage/testsuites/base.go index e0c9e65f5be..96e2ff16674 100644 --- a/test/e2e/storage/testsuites/base.go +++ b/test/e2e/storage/testsuites/base.go @@ -17,7 +17,9 @@ limitations under the License. package testsuites import ( + "context" "fmt" + "regexp" "time" . "github.com/onsi/ginkgo" @@ -32,6 +34,7 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/framework/podlogs" "k8s.io/kubernetes/test/e2e/storage/testpatterns" ) @@ -39,10 +42,10 @@ import ( type TestSuite interface { // getTestSuiteInfo returns the TestSuiteInfo for this TestSuite getTestSuiteInfo() TestSuiteInfo - // skipUnsupportedTest skips the test if this TestSuite is not suitable to be tested with the combination of TestPattern and TestDriver - skipUnsupportedTest(testpatterns.TestPattern, TestDriver) - // execTest executes test of the testpattern for the driver - execTest(TestDriver, testpatterns.TestPattern) + // defineTest defines tests of the testpattern for the driver. + // Called inside a Ginkgo context that reflects the current driver and test pattern, + // so the test suite can define tests directly with ginkgo.It. + defineTests(TestDriver, testpatterns.TestPattern) } // TestSuiteInfo represents a set of parameters for TestSuite @@ -54,11 +57,8 @@ type TestSuiteInfo struct { // TestResource represents an interface for resources that is used by TestSuite type TestResource interface { - // setupResource sets up test resources to be used for the tests with the - // combination of TestDriver and TestPattern - setupResource(TestDriver, testpatterns.TestPattern) - // cleanupResource clean up the test resources created in SetupResource - cleanupResource(TestDriver, testpatterns.TestPattern) + // cleanupResource cleans up the test resources created when setting up the resource + cleanupResource() } func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string { @@ -66,27 +66,36 @@ func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string { return fmt.Sprintf("[Testpattern: %s]%s %s%s", pattern.Name, pattern.FeatureTag, tsInfo.name, tsInfo.featureTag) } -// RunTestSuite runs all testpatterns of all testSuites for a driver -func RunTestSuite(f *framework.Framework, driver TestDriver, tsInits []func() TestSuite, tunePatternFunc func([]testpatterns.TestPattern) []testpatterns.TestPattern) { +// DefineTestSuite defines tests for all testpatterns and all testSuites for a driver +func DefineTestSuite(driver TestDriver, tsInits []func() TestSuite, tunePatternFunc func([]testpatterns.TestPattern) []testpatterns.TestPattern) { for _, testSuiteInit := range tsInits { suite := testSuiteInit() patterns := tunePatternFunc(suite.getTestSuiteInfo().testPatterns) for _, pattern := range patterns { - suite.execTest(driver, pattern) + p := pattern + Context(getTestNameStr(suite, p), func() { + BeforeEach(func() { + // Skip unsupported tests to avoid unnecessary resource initialization + skipUnsupportedTest(driver, p) + }) + suite.defineTests(driver, p) + }) } } } -// skipUnsupportedTest will skip tests if the combination of driver, testsuite, and testpattern +// skipUnsupportedTest will skip tests if the combination of driver, and testpattern // is not suitable to be tested. // Whether it needs to be skipped is checked by following steps: // 1. Check if Whether SnapshotType is supported by driver from its interface // 2. Check if Whether volType is supported by driver from its interface // 3. Check if fsType is supported // 4. Check with driver specific logic -// 5. Check with testSuite specific logic -func skipUnsupportedTest(suite TestSuite, driver TestDriver, pattern testpatterns.TestPattern) { +// +// Test suites can also skip tests inside their own defineTests function or in +// individual tests. +func skipUnsupportedTest(driver TestDriver, pattern testpatterns.TestPattern) { dInfo := driver.GetDriverInfo() var isSupported bool @@ -130,9 +139,6 @@ func skipUnsupportedTest(suite TestSuite, driver TestDriver, pattern testpattern // 4. Check with driver specific logic driver.SkipUnsupportedTest(pattern) - - // 5. Check with testSuite specific logic - suite.skipUnsupportedTest(pattern, driver) } // genericVolumeTestResource is a generic implementation of TestResource that wil be able to @@ -141,6 +147,8 @@ func skipUnsupportedTest(suite TestSuite, driver TestDriver, pattern testpattern // Also, see subpath.go in the same directory for how to extend and use it. type genericVolumeTestResource struct { driver TestDriver + config *PerTestConfig + pattern testpatterns.TestPattern volType string volSource *v1.VolumeSource pvc *v1.PersistentVolumeClaim @@ -152,17 +160,20 @@ type genericVolumeTestResource struct { var _ TestResource = &genericVolumeTestResource{} -// setupResource sets up genericVolumeTestResource -func (r *genericVolumeTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) { - r.driver = driver +func createGenericVolumeTestResource(driver TestDriver, config *PerTestConfig, pattern testpatterns.TestPattern) *genericVolumeTestResource { + r := genericVolumeTestResource{ + driver: driver, + config: config, + pattern: pattern, + } dInfo := driver.GetDriverInfo() - f := dInfo.Config.Framework + f := config.Framework cs := f.ClientSet fsType := pattern.FsType volType := pattern.VolType // Create volume for pre-provisioned volume tests - r.volume = CreateVolume(driver, volType) + r.volume = CreateVolume(driver, config, volType) switch volType { case testpatterns.InlineVolume: @@ -184,7 +195,7 @@ func (r *genericVolumeTestResource) setupResource(driver TestDriver, pattern tes framework.Logf("Creating resource for dynamic PV") if dDriver, ok := driver.(DynamicPVTestDriver); ok { claimSize := dDriver.GetClaimSize() - r.sc = dDriver.GetDynamicProvisionStorageClass(fsType) + r.sc = dDriver.GetDynamicProvisionStorageClass(r.config, fsType) By("creating a StorageClass " + r.sc.Name) var err error @@ -204,13 +215,14 @@ func (r *genericVolumeTestResource) setupResource(driver TestDriver, pattern tes if r.volSource == nil { framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, volType) } + + return &r } // cleanupResource cleans up genericVolumeTestResource -func (r *genericVolumeTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) { - dInfo := driver.GetDriverInfo() - f := dInfo.Config.Framework - volType := pattern.VolType +func (r *genericVolumeTestResource) cleanupResource() { + f := r.config.Framework + volType := r.pattern.VolType if r.pvc != nil || r.pv != nil { switch volType { @@ -356,7 +368,7 @@ func deleteStorageClass(cs clientset.Interface, className string) { // the testsuites package whereas framework.VolumeTestConfig is merely // an implementation detail. It contains fields that have no effect, // which makes it unsuitable for use in the testsuits public API. -func convertTestConfig(in *TestConfig) framework.VolumeTestConfig { +func convertTestConfig(in *PerTestConfig) framework.VolumeTestConfig { if in.ServerConfig != nil { return *in.ServerConfig } @@ -390,3 +402,42 @@ func getSnapshot(claimName string, ns, snapshotClassName string) *unstructured.U return snapshot } + +// StartPodLogs begins capturing log output and events from current +// and future pods running in the namespace of the framework. That +// ends when the returned cleanup function is called. +// +// The output goes to log files (when using --report-dir, as in the +// CI) or the output stream (otherwise). +func StartPodLogs(f *framework.Framework) func() { + ctx, cancel := context.WithCancel(context.Background()) + cs := f.ClientSet + ns := f.Namespace + + to := podlogs.LogOutput{ + StatusWriter: GinkgoWriter, + } + if framework.TestContext.ReportDir == "" { + to.LogWriter = GinkgoWriter + } else { + test := CurrentGinkgoTestDescription() + reg := regexp.MustCompile("[^a-zA-Z0-9_-]+") + // We end the prefix with a slash to ensure that all logs + // end up in a directory named after the current test. + // + // TODO: use a deeper directory hierarchy once gubernator + // supports that (https://github.com/kubernetes/test-infra/issues/10289). + to.LogPathPrefix = framework.TestContext.ReportDir + "/" + + reg.ReplaceAllString(test.FullTestText, "_") + "/" + } + podlogs.CopyAllLogs(ctx, cs, ns.Name, to) + + // pod events are something that the framework already collects itself + // after a failed test. Logging them live is only useful for interactive + // debugging, not when we collect reports. + if framework.TestContext.ReportDir == "" { + podlogs.WatchPods(ctx, cs, ns.Name, GinkgoWriter) + } + + return cancel +} diff --git a/test/e2e/storage/testsuites/driveroperations.go b/test/e2e/storage/testsuites/driveroperations.go index 45132bfa803..d17b3619bfc 100644 --- a/test/e2e/storage/testsuites/driveroperations.go +++ b/test/e2e/storage/testsuites/driveroperations.go @@ -37,13 +37,13 @@ func GetDriverNameWithFeatureTags(driver TestDriver) string { } // CreateVolume creates volume for test unless dynamicPV test -func CreateVolume(driver TestDriver, volType testpatterns.TestVolType) TestVolume { +func CreateVolume(driver TestDriver, config *PerTestConfig, volType testpatterns.TestVolType) TestVolume { switch volType { case testpatterns.InlineVolume: fallthrough case testpatterns.PreprovisionedPV: if pDriver, ok := driver.(PreprovisionedVolumeTestDriver); ok { - return pDriver.CreateVolume(volType) + return pDriver.CreateVolume(config, volType) } case testpatterns.DynamicPV: // No need to create volume @@ -103,8 +103,3 @@ func GetSnapshotClass( return snapshotClass } - -// GetUniqueDriverName returns unique driver name that can be used parallelly in tests -func GetUniqueDriverName(driver TestDriver) string { - return fmt.Sprintf("%s-%s", driver.GetDriverInfo().Name, driver.GetDriverInfo().Config.Framework.UniqueName) -} diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index 5478042385e..6153aab6603 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -41,6 +41,9 @@ import ( // StorageClassTest represents parameters to be used by provisioning tests. // Not all parameters are used by all tests. type StorageClassTest struct { + Client clientset.Interface + Claim *v1.PersistentVolumeClaim + Class *storage.StorageClass Name string CloudProviders []string Provisioner string @@ -76,183 +79,156 @@ func (p *provisioningTestSuite) getTestSuiteInfo() TestSuiteInfo { return p.tsInfo } -func (p *provisioningTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) { -} +func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { + var ( + dInfo = driver.GetDriverInfo() + dDriver DynamicPVTestDriver + config *PerTestConfig + testCleanup func() + testCase *StorageClassTest + cs clientset.Interface + pvc *v1.PersistentVolumeClaim + sc *storage.StorageClass + ) -func createProvisioningTestInput(driver TestDriver, pattern testpatterns.TestPattern) (provisioningTestResource, provisioningTestInput) { - // Setup test resource for driver and testpattern - resource := provisioningTestResource{} - resource.setupResource(driver, pattern) - - input := provisioningTestInput{ - testCase: StorageClassTest{ - ClaimSize: resource.claimSize, - ExpectedSize: resource.claimSize, - }, - cs: driver.GetDriverInfo().Config.Framework.ClientSet, - dc: driver.GetDriverInfo().Config.Framework.DynamicClient, - pvc: resource.pvc, - sc: resource.sc, - vsc: resource.vsc, - dInfo: driver.GetDriverInfo(), - nodeName: driver.GetDriverInfo().Config.ClientNodeName, - } - - return resource, input -} - -func (p *provisioningTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) { - Context(getTestNameStr(p, pattern), func() { - var ( - resource provisioningTestResource - input provisioningTestInput - needsCleanup bool - ) - - BeforeEach(func() { - needsCleanup = false - // Skip unsupported tests to avoid unnecessary resource initialization - skipUnsupportedTest(p, driver, pattern) - needsCleanup = true - - // Create test input - resource, input = createProvisioningTestInput(driver, pattern) - }) - - AfterEach(func() { - if needsCleanup { - resource.cleanupResource(driver, pattern) - } - }) - - // Ginkgo's "Global Shared Behaviors" require arguments for a shared function - // to be a single struct and to be passed as a pointer. - // Please see https://onsi.github.io/ginkgo/#global-shared-behaviors for details. - testProvisioning(&input) - }) -} - -type provisioningTestResource struct { - driver TestDriver - - claimSize string - sc *storage.StorageClass - pvc *v1.PersistentVolumeClaim - // follow parameter is used to test provision volume from snapshot - vsc *unstructured.Unstructured -} - -var _ TestResource = &provisioningTestResource{} - -func (p *provisioningTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) { - // Setup provisioningTest resource - switch pattern.VolType { - case testpatterns.DynamicPV: - if dDriver, ok := driver.(DynamicPVTestDriver); ok { - p.sc = dDriver.GetDynamicProvisionStorageClass("") - if p.sc == nil { - framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", driver.GetDriverInfo().Name) - } - p.driver = driver - p.claimSize = dDriver.GetClaimSize() - p.pvc = getClaim(p.claimSize, driver.GetDriverInfo().Config.Framework.Namespace.Name) - p.pvc.Spec.StorageClassName = &p.sc.Name - framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", p.sc, p.pvc) - if sDriver, ok := driver.(SnapshottableTestDriver); ok { - p.vsc = sDriver.GetSnapshotClass() - } + BeforeEach(func() { + // Check preconditions. + if pattern.VolType != testpatterns.DynamicPV { + framework.Skipf("Suite %q does not support %v", p.tsInfo.name, pattern.VolType) + } + ok := false + dDriver, ok = driver.(DynamicPVTestDriver) + if !ok { + framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType) + } + }) + + // This intentionally comes after checking the preconditions because it + // registers its own BeforeEach which creates the namespace. Beware that it + // also registers an AfterEach which renders f unusable. Any code using + // f must run inside an It or Context callback. + f := framework.NewDefaultFramework("provisioning") + + init := func() { + // Now do the more expensive test initialization. + config, testCleanup = driver.PrepareTest(f) + cs = config.Framework.ClientSet + claimSize := dDriver.GetClaimSize() + sc = dDriver.GetDynamicProvisionStorageClass(config, "") + if sc == nil { + framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name) + } + pvc = getClaim(claimSize, config.Framework.Namespace.Name) + pvc.Spec.StorageClassName = &sc.Name + framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", sc, pvc) + testCase = &StorageClassTest{ + Client: config.Framework.ClientSet, + Claim: pvc, + Class: sc, + ClaimSize: claimSize, + ExpectedSize: claimSize, } - default: - framework.Failf("Dynamic Provision test doesn't support: %s", pattern.VolType) } -} -func (p *provisioningTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) { -} - -type provisioningTestInput struct { - testCase StorageClassTest - cs clientset.Interface - dc dynamic.Interface - pvc *v1.PersistentVolumeClaim - sc *storage.StorageClass - vsc *unstructured.Unstructured - dInfo *DriverInfo - nodeName string -} - -func testProvisioning(input *provisioningTestInput) { - // common checker for most of the test cases below - pvcheck := func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { - PVWriteReadSingleNodeCheck(input.cs, claim, volume, NodeSelection{Name: input.nodeName}) + cleanup := func() { + if testCleanup != nil { + testCleanup() + testCleanup = nil + } } It("should provision storage with defaults", func() { - input.testCase.PvCheck = pvcheck - TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc) + init() + defer cleanup() + + testCase.TestDynamicProvisioning() }) It("should provision storage with mount options", func() { - if input.dInfo.SupportedMountOption == nil { - framework.Skipf("Driver %q does not define supported mount option - skipping", input.dInfo.Name) + if dInfo.SupportedMountOption == nil { + framework.Skipf("Driver %q does not define supported mount option - skipping", dInfo.Name) } - input.sc.MountOptions = input.dInfo.SupportedMountOption.Union(input.dInfo.RequiredMountOption).List() - input.testCase.PvCheck = pvcheck - TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc) + init() + defer cleanup() + + testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List() + testCase.TestDynamicProvisioning() }) It("should access volume from different nodes", func() { + init() + defer cleanup() + // The assumption is that if the test hasn't been // locked onto a single node, then the driver is // usable on all of them *and* supports accessing a volume // from any node. - if input.nodeName != "" { - framework.Skipf("Driver %q only supports testing on one node - skipping", input.dInfo.Name) + if config.ClientNodeName != "" { + framework.Skipf("Driver %q only supports testing on one node - skipping", dInfo.Name) } + // Ensure that we actually have more than one node. - nodes := framework.GetReadySchedulableNodesOrDie(input.cs) + nodes := framework.GetReadySchedulableNodesOrDie(cs) if len(nodes.Items) <= 1 { framework.Skipf("need more than one node - skipping") } - input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { - PVMultiNodeCheck(input.cs, claim, volume, NodeSelection{Name: input.nodeName}) + testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + PVMultiNodeCheck(cs, claim, volume, NodeSelection{Name: config.ClientNodeName}) } - TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc) + testCase.TestDynamicProvisioning() }) It("should create and delete block persistent volumes", func() { - if !input.dInfo.Capabilities[CapBlock] { - framework.Skipf("Driver %q does not support BlockVolume - skipping", input.dInfo.Name) + if !dInfo.Capabilities[CapBlock] { + framework.Skipf("Driver %q does not support BlockVolume - skipping", dInfo.Name) } + + init() + defer cleanup() + block := v1.PersistentVolumeBlock - input.testCase.VolumeMode = &block - input.pvc.Spec.VolumeMode = &block - TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc) + testCase.VolumeMode = &block + pvc.Spec.VolumeMode = &block + testCase.TestDynamicProvisioning() }) It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() { - if !input.dInfo.Capabilities[CapDataSource] { - framework.Skipf("Driver %q does not support populate data from snapshot - skipping", input.dInfo.Name) + if !dInfo.Capabilities[CapDataSource] { + framework.Skipf("Driver %q does not support populate data from snapshot - skipping", dInfo.Name) } - dataSource, cleanupFunc := prepareDataSourceForProvisioning(NodeSelection{Name: input.nodeName}, input.cs, input.dc, input.pvc, input.sc, input.vsc) + sDriver, ok := driver.(SnapshottableTestDriver) + if !ok { + framework.Failf("Driver %q has CapDataSource but does not implement SnapshottableTestDriver", dInfo.Name) + } + + init() + defer cleanup() + + dc := config.Framework.DynamicClient + vsc := sDriver.GetSnapshotClass(config) + dataSource, cleanupFunc := prepareDataSourceForProvisioning(NodeSelection{Name: config.ClientNodeName}, cs, dc, pvc, sc, vsc) defer cleanupFunc() - input.pvc.Spec.DataSource = dataSource - input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + pvc.Spec.DataSource = dataSource + testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { By("checking whether the created volume has the pre-populated data") command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace) - RunInPodWithVolume(input.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, NodeSelection{Name: input.nodeName}) + RunInPodWithVolume(cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, NodeSelection{Name: config.ClientNodeName}) } - TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc) + testCase.TestDynamicProvisioning() }) It("should allow concurrent writes on the same node", func() { - if !input.dInfo.Capabilities[CapMultiPODs] { - framework.Skipf("Driver %q does not support multiple concurrent pods - skipping", input.dInfo.Name) + if !dInfo.Capabilities[CapMultiPODs] { + framework.Skipf("Driver %q does not support multiple concurrent pods - skipping", dInfo.Name) } - input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + + init() + defer cleanup() + + testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { // We start two pods concurrently on the same node, // using the same PVC. Both wait for other to create a // file before returning. The pods are forced onto the @@ -265,7 +241,7 @@ func testProvisioning(input *provisioningTestInput) { defer GinkgoRecover() defer wg.Done() node := NodeSelection{ - Name: input.nodeName, + Name: config.ClientNodeName, } if podName == secondPodName { node.Affinity = &v1.Affinity{ @@ -283,18 +259,24 @@ func testProvisioning(input *provisioningTestInput) { }, } } - RunInPodWithVolume(input.cs, claim.Namespace, claim.Name, podName, command, node) + RunInPodWithVolume(cs, claim.Namespace, claim.Name, podName, command, node) } go run(firstPodName, "touch /mnt/test/first && while ! [ -f /mnt/test/second ]; do sleep 1; done") go run(secondPodName, "touch /mnt/test/second && while ! [ -f /mnt/test/first ]; do sleep 1; done") wg.Wait() } - TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc) + testCase.TestDynamicProvisioning() }) } -// TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest and storageClass -func TestDynamicProvisioning(t StorageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass) *v1.PersistentVolume { +// TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest +func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume { + client := t.Client + Expect(client).NotTo(BeNil(), "StorageClassTest.Client is required") + claim := t.Claim + Expect(claim).NotTo(BeNil(), "StorageClassTest.Claim is required") + class := t.Class + var err error if class != nil { Expect(*claim.Spec.StorageClassName).To(Equal(class.Name)) @@ -493,29 +475,29 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai pod = nil } -func TestBindingWaitForFirstConsumer(t StorageClassTest, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass, nodeSelector map[string]string, expectUnschedulable bool) (*v1.PersistentVolume, *v1.Node) { - pvs, node := TestBindingWaitForFirstConsumerMultiPVC(t, client, []*v1.PersistentVolumeClaim{claim}, class, nodeSelector, expectUnschedulable) +func (t StorageClassTest) TestBindingWaitForFirstConsumer(nodeSelector map[string]string, expectUnschedulable bool) (*v1.PersistentVolume, *v1.Node) { + pvs, node := t.TestBindingWaitForFirstConsumerMultiPVC([]*v1.PersistentVolumeClaim{t.Claim}, nodeSelector, expectUnschedulable) if pvs == nil { return nil, node } return pvs[0], node } -func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientset.Interface, claims []*v1.PersistentVolumeClaim, class *storage.StorageClass, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) { +func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.PersistentVolumeClaim, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) { var err error Expect(len(claims)).ToNot(Equal(0)) namespace := claims[0].Namespace - By("creating a storage class " + class.Name) - class, err = client.StorageV1().StorageClasses().Create(class) + By("creating a storage class " + t.Class.Name) + class, err := t.Client.StorageV1().StorageClasses().Create(t.Class) Expect(err).NotTo(HaveOccurred()) - defer deleteStorageClass(client, class.Name) + defer deleteStorageClass(t.Client, class.Name) By("creating claims") var claimNames []string var createdClaims []*v1.PersistentVolumeClaim for _, claim := range claims { - c, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim) + c, err := t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim) claimNames = append(claimNames, c.Name) createdClaims = append(createdClaims, c) Expect(err).NotTo(HaveOccurred()) @@ -523,7 +505,7 @@ func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientse defer func() { var errors map[string]error for _, claim := range createdClaims { - err := framework.DeletePersistentVolumeClaim(client, claim.Name, claim.Namespace) + err := framework.DeletePersistentVolumeClaim(t.Client, claim.Name, claim.Namespace) if err != nil { errors[claim.Name] = err } @@ -537,44 +519,44 @@ func TestBindingWaitForFirstConsumerMultiPVC(t StorageClassTest, client clientse // Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out By("checking the claims are in pending state") - err = framework.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, client, namespace, claimNames, 2*time.Second /* Poll */, framework.ClaimProvisionShortTimeout, true) + err = framework.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, framework.ClaimProvisionShortTimeout, true) Expect(err).To(HaveOccurred()) - verifyPVCsPending(client, createdClaims) + verifyPVCsPending(t.Client, createdClaims) By("creating a pod referring to the claims") // Create a pod referring to the claim and wait for it to get to running var pod *v1.Pod if expectUnschedulable { - pod, err = framework.CreateUnschedulablePod(client, namespace, nodeSelector, createdClaims, true /* isPrivileged */, "" /* command */) + pod, err = framework.CreateUnschedulablePod(t.Client, namespace, nodeSelector, createdClaims, true /* isPrivileged */, "" /* command */) } else { - pod, err = framework.CreatePod(client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */) + pod, err = framework.CreatePod(t.Client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */) } Expect(err).NotTo(HaveOccurred()) defer func() { - framework.DeletePodOrFail(client, pod.Namespace, pod.Name) - framework.WaitForPodToDisappear(client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout) + framework.DeletePodOrFail(t.Client, pod.Namespace, pod.Name) + framework.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout) }() if expectUnschedulable { // Verify that no claims are provisioned. - verifyPVCsPending(client, createdClaims) + verifyPVCsPending(t.Client, createdClaims) return nil, nil } // collect node details - node, err := client.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{}) + node, err := t.Client.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) By("re-checking the claims to see they binded") var pvs []*v1.PersistentVolume for _, claim := range createdClaims { // Get new copy of the claim - claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) + claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) // make sure claim did bind - err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout) + err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout) Expect(err).NotTo(HaveOccurred()) - pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{}) + pv, err := t.Client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) pvs = append(pvs, pv) } diff --git a/test/e2e/storage/testsuites/snapshottable.go b/test/e2e/storage/testsuites/snapshottable.go index 98f4ca0c8e6..64507514500 100644 --- a/test/e2e/storage/testsuites/snapshottable.go +++ b/test/e2e/storage/testsuites/snapshottable.go @@ -24,13 +24,10 @@ import ( . "github.com/onsi/gomega" "k8s.io/api/core/v1" - storage "k8s.io/api/storage/v1" apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" - clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/testpatterns" ) @@ -48,13 +45,12 @@ var ( ) type SnapshotClassTest struct { - Name string - CloudProviders []string - Snapshotter string - Parameters map[string]string - NodeName string - NodeSelector map[string]string // NodeSelector for the pod - SnapshotContentCheck func(snapshotContent *unstructured.Unstructured) error + Name string + CloudProviders []string + Snapshotter string + Parameters map[string]string + NodeName string + NodeSelector map[string]string // NodeSelector for the pod } type snapshottableTestSuite struct { @@ -79,218 +75,129 @@ func (s *snapshottableTestSuite) getTestSuiteInfo() TestSuiteInfo { return s.tsInfo } -func (s *snapshottableTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) { - dInfo := driver.GetDriverInfo() - if !dInfo.Capabilities[CapDataSource] { - framework.Skipf("Driver %q does not support snapshots - skipping", dInfo.Name) - } -} +func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { + var ( + sDriver SnapshottableTestDriver + dDriver DynamicPVTestDriver + ) -func createSnapshottableTestInput(driver TestDriver, pattern testpatterns.TestPattern) (snapshottableTestResource, snapshottableTestInput) { - // Setup test resource for driver and testpattern - resource := snapshottableTestResource{} - resource.setupResource(driver, pattern) - - dInfo := driver.GetDriverInfo() - input := snapshottableTestInput{ - testCase: SnapshotClassTest{ - NodeName: dInfo.Config.ClientNodeName, - }, - cs: dInfo.Config.Framework.ClientSet, - dc: dInfo.Config.Framework.DynamicClient, - pvc: resource.pvc, - sc: resource.sc, - vsc: resource.vsc, - dInfo: dInfo, - } - - return resource, input -} - -func (s *snapshottableTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) { - Context(getTestNameStr(s, pattern), func() { - var ( - resource snapshottableTestResource - input snapshottableTestInput - needsCleanup bool - ) - - BeforeEach(func() { - needsCleanup = false - // Skip unsupported tests to avoid unnecessary resource initialization - skipUnsupportedTest(s, driver, pattern) - needsCleanup = true - - // Create test input - resource, input = createSnapshottableTestInput(driver, pattern) - }) - - AfterEach(func() { - if needsCleanup { - resource.cleanupResource(driver, pattern) - } - }) - - // Ginkgo's "Global Shared Behaviors" require arguments for a shared function - // to be a single struct and to be passed as a pointer. - // Please see https://onsi.github.io/ginkgo/#global-shared-behaviors for details. - testSnapshot(&input) + BeforeEach(func() { + // Check preconditions. + Expect(pattern.SnapshotType).To(Equal(testpatterns.DynamicCreatedSnapshot)) + dInfo := driver.GetDriverInfo() + ok := false + sDriver, ok = driver.(SnapshottableTestDriver) + if !dInfo.Capabilities[CapDataSource] || !ok { + framework.Skipf("Driver %q does not support snapshots - skipping", dInfo.Name) + } + dDriver, ok = driver.(DynamicPVTestDriver) + if !ok { + framework.Skipf("Driver %q does not support dynamic provisioning - skipping", driver.GetDriverInfo().Name) + } }) -} -type snapshottableTestResource struct { - driver TestDriver - claimSize string + // This intentionally comes after checking the preconditions because it + // registers its own BeforeEach which creates the namespace. Beware that it + // also registers an AfterEach which renders f unusable. Any code using + // f must run inside an It or Context callback. + f := framework.NewDefaultFramework("snapshotting") - sc *storage.StorageClass - pvc *v1.PersistentVolumeClaim - // volume snapshot class - vsc *unstructured.Unstructured -} + It("should create snapshot with defaults [Feature:VolumeSnapshotDataSource]", func() { + cs := f.ClientSet + dc := f.DynamicClient -var _ TestResource = &snapshottableTestResource{} + // Now do the more expensive test initialization. + config, testCleanup := driver.PrepareTest(f) + defer testCleanup() -func (s *snapshottableTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) { - // Setup snapshottableTest resource - switch pattern.SnapshotType { - case testpatterns.DynamicCreatedSnapshot: - if dDriver, ok := driver.(DynamicPVTestDriver); ok { - s.sc = dDriver.GetDynamicProvisionStorageClass("") - if s.sc == nil { - framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", driver.GetDriverInfo().Name) - } - s.driver = driver - s.claimSize = dDriver.GetClaimSize() - s.pvc = getClaim(s.claimSize, driver.GetDriverInfo().Config.Framework.Namespace.Name) - s.pvc.Spec.StorageClassName = &s.sc.Name - framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", s.sc, s.pvc) - - if sDriver, ok := driver.(SnapshottableTestDriver); ok { - s.vsc = sDriver.GetSnapshotClass() - } + vsc := sDriver.GetSnapshotClass(config) + class := dDriver.GetDynamicProvisionStorageClass(config, "") + if class == nil { + framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", driver.GetDriverInfo().Name) } - default: - framework.Failf("Dynamic Snapshot test doesn't support: %s", pattern.SnapshotType) - } -} + claimSize := dDriver.GetClaimSize() + pvc := getClaim(claimSize, config.Framework.Namespace.Name) + pvc.Spec.StorageClassName = &class.Name + framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", class, pvc) -func (s *snapshottableTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) { -} - -type snapshottableTestInput struct { - testCase SnapshotClassTest - cs clientset.Interface - dc dynamic.Interface - pvc *v1.PersistentVolumeClaim - sc *storage.StorageClass - // volume snapshot class - vsc *unstructured.Unstructured - dInfo *DriverInfo -} - -func testSnapshot(input *snapshottableTestInput) { - It("should create snapshot with defaults [Feature:VolumeSnapshotDataSource]", func() { - TestCreateSnapshot(input.testCase, input.cs, input.dc, input.pvc, input.sc, input.vsc) - }) -} - -// TestCreateSnapshot tests dynamic creating snapshot with specified SnapshotClassTest and snapshotClass -func TestCreateSnapshot( - t SnapshotClassTest, - client clientset.Interface, - dynamicClient dynamic.Interface, - claim *v1.PersistentVolumeClaim, - class *storage.StorageClass, - snapshotClass *unstructured.Unstructured, -) *unstructured.Unstructured { - var err error - if class != nil { By("creating a StorageClass " + class.Name) - class, err = client.StorageV1().StorageClasses().Create(class) + class, err := cs.StorageV1().StorageClasses().Create(class) Expect(err).NotTo(HaveOccurred()) defer func() { framework.Logf("deleting storage class %s", class.Name) - framework.ExpectNoError(client.StorageV1().StorageClasses().Delete(class.Name, nil)) + framework.ExpectNoError(cs.StorageV1().StorageClasses().Delete(class.Name, nil)) }() - } - By("creating a claim") - claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim) - Expect(err).NotTo(HaveOccurred()) - defer func() { - framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name) - // typically this claim has already been deleted - err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil) - if err != nil && !apierrs.IsNotFound(err) { - framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err) - } - }() - err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout) - Expect(err).NotTo(HaveOccurred()) - - By("checking the claim") - // Get new copy of the claim - claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - // Get the bound PV - pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("creating a SnapshotClass") - snapshotClass, err = dynamicClient.Resource(snapshotClassGVR).Create(snapshotClass, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - defer func() { - framework.Logf("deleting SnapshotClass %s", snapshotClass.GetName()) - framework.ExpectNoError(dynamicClient.Resource(snapshotClassGVR).Delete(snapshotClass.GetName(), nil)) - }() - - By("creating a snapshot") - snapshot := getSnapshot(claim.Name, claim.Namespace, snapshotClass.GetName()) - - snapshot, err = dynamicClient.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Create(snapshot, metav1.CreateOptions{}) - Expect(err).NotTo(HaveOccurred()) - defer func() { - framework.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName()) - // typically this snapshot has already been deleted - err = dynamicClient.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Delete(snapshot.GetName(), nil) - if err != nil && !apierrs.IsNotFound(err) { - framework.Failf("Error deleting snapshot %q. Error: %v", claim.Name, err) - } - }() - err = WaitForSnapshotReady(dynamicClient, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout) - Expect(err).NotTo(HaveOccurred()) - - By("checking the snapshot") - // Get new copy of the snapshot - snapshot, err = dynamicClient.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Get(snapshot.GetName(), metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - // Get the bound snapshotContent - snapshotSpec := snapshot.Object["spec"].(map[string]interface{}) - snapshotContentName := snapshotSpec["snapshotContentName"].(string) - snapshotContent, err := dynamicClient.Resource(snapshotContentGVR).Get(snapshotContentName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - snapshotContentSpec := snapshotContent.Object["spec"].(map[string]interface{}) - volumeSnapshotRef := snapshotContentSpec["volumeSnapshotRef"].(map[string]interface{}) - persistentVolumeRef := snapshotContentSpec["persistentVolumeRef"].(map[string]interface{}) - - // Check SnapshotContent properties - By("checking the SnapshotContent") - Expect(snapshotContentSpec["snapshotClassName"]).To(Equal(snapshotClass.GetName())) - Expect(volumeSnapshotRef["name"]).To(Equal(snapshot.GetName())) - Expect(volumeSnapshotRef["namespace"]).To(Equal(snapshot.GetNamespace())) - Expect(persistentVolumeRef["name"]).To(Equal(pv.Name)) - - // Run the checker - if t.SnapshotContentCheck != nil { - err = t.SnapshotContentCheck(snapshotContent) + By("creating a claim") + pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(pvc) + Expect(err).NotTo(HaveOccurred()) + defer func() { + framework.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name) + // typically this claim has already been deleted + err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil) + if err != nil && !apierrs.IsNotFound(err) { + framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err) + } + }() + err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) Expect(err).NotTo(HaveOccurred()) - } - return snapshotContent + By("checking the claim") + // Get new copy of the claim + pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + // Get the bound PV + pv, err := cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("creating a SnapshotClass") + vsc, err = dc.Resource(snapshotClassGVR).Create(vsc, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + defer func() { + framework.Logf("deleting SnapshotClass %s", vsc.GetName()) + framework.ExpectNoError(dc.Resource(snapshotClassGVR).Delete(vsc.GetName(), nil)) + }() + + By("creating a snapshot") + snapshot := getSnapshot(pvc.Name, pvc.Namespace, vsc.GetName()) + + snapshot, err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Create(snapshot, metav1.CreateOptions{}) + Expect(err).NotTo(HaveOccurred()) + defer func() { + framework.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName()) + // typically this snapshot has already been deleted + err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Delete(snapshot.GetName(), nil) + if err != nil && !apierrs.IsNotFound(err) { + framework.Failf("Error deleting snapshot %q. Error: %v", pvc.Name, err) + } + }() + err = WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout) + Expect(err).NotTo(HaveOccurred()) + + By("checking the snapshot") + // Get new copy of the snapshot + snapshot, err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Get(snapshot.GetName(), metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + // Get the bound snapshotContent + snapshotSpec := snapshot.Object["spec"].(map[string]interface{}) + snapshotContentName := snapshotSpec["snapshotContentName"].(string) + snapshotContent, err := dc.Resource(snapshotContentGVR).Get(snapshotContentName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + snapshotContentSpec := snapshotContent.Object["spec"].(map[string]interface{}) + volumeSnapshotRef := snapshotContentSpec["volumeSnapshotRef"].(map[string]interface{}) + persistentVolumeRef := snapshotContentSpec["persistentVolumeRef"].(map[string]interface{}) + + // Check SnapshotContent properties + By("checking the SnapshotContent") + Expect(snapshotContentSpec["snapshotClassName"]).To(Equal(vsc.GetName())) + Expect(volumeSnapshotRef["name"]).To(Equal(snapshot.GetName())) + Expect(volumeSnapshotRef["namespace"]).To(Equal(snapshot.GetNamespace())) + Expect(persistentVolumeRef["name"]).To(Equal(pv.Name)) + }) } // WaitForSnapshotReady waits for a VolumeSnapshot to be ready to use or until timeout occurs, whichever comes first. diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index 072182e838a..1d36a0efbaa 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -26,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/wait" + clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -71,345 +72,352 @@ func (s *subPathTestSuite) getTestSuiteInfo() TestSuiteInfo { return s.tsInfo } -func (s *subPathTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) { -} +func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { + var ( + config *PerTestConfig + testCleanup func() + cs clientset.Interface + resource *genericVolumeTestResource + roVolSource *v1.VolumeSource + pod *v1.Pod + formatPod *v1.Pod + subPathDir string + filePathInSubpath string + filePathInVolume string + ) -func createSubPathTestInput(pattern testpatterns.TestPattern, resource subPathTestResource) subPathTestInput { - driver := resource.driver - dInfo := driver.GetDriverInfo() - f := dInfo.Config.Framework - subPath := f.Namespace.Name - subPathDir := filepath.Join(volumePath, subPath) + // No preconditions to test. Normally they would be in a BeforeEach here. - return subPathTestInput{ - f: f, - subPathDir: subPathDir, - filePathInSubpath: filepath.Join(volumePath, fileName), - filePathInVolume: filepath.Join(subPathDir, fileName), - volType: resource.volType, - pod: resource.pod, - formatPod: resource.formatPod, - volSource: resource.genericVolumeTestResource.volSource, - roVol: resource.roVolSource, - } -} + // This intentionally comes after checking the preconditions because it + // registers its own BeforeEach which creates the namespace. Beware that it + // also registers an AfterEach which renders f unusable. Any code using + // f must run inside an It or Context callback. + f := framework.NewDefaultFramework("provisioning") -func (s *subPathTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) { - Context(getTestNameStr(s, pattern), func() { - var ( - resource subPathTestResource - input subPathTestInput - needsCleanup bool - ) + init := func() { + cs = f.ClientSet // needed for cleanup, f.ClientSet itself gets reset too early - BeforeEach(func() { - needsCleanup = false - // Skip unsupported tests to avoid unnecessary resource initialization - skipUnsupportedTest(s, driver, pattern) - needsCleanup = true + // Now do the more expensive test initialization. + config, testCleanup = driver.PrepareTest(f) + fsType := pattern.FsType + volType := pattern.VolType - // Setup test resource for driver and testpattern - resource = subPathTestResource{} - resource.setupResource(driver, pattern) + resource = createGenericVolumeTestResource(driver, config, pattern) - // Create test input - input = createSubPathTestInput(pattern, resource) - }) - - AfterEach(func() { - if needsCleanup { - resource.cleanupResource(driver, pattern) + // Setup subPath test dependent resource + roVolSource = nil + switch volType { + case testpatterns.InlineVolume: + if iDriver, ok := driver.(InlineVolumeTestDriver); ok { + roVolSource = iDriver.GetVolumeSource(true, fsType, resource.volume) } - }) - - testSubPath(&input) - }) -} - -type subPathTestResource struct { - genericVolumeTestResource - - roVolSource *v1.VolumeSource - pod *v1.Pod - formatPod *v1.Pod -} - -var _ TestResource = &subPathTestResource{} - -func (s *subPathTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) { - s.driver = driver - dInfo := s.driver.GetDriverInfo() - f := dInfo.Config.Framework - fsType := pattern.FsType - volType := pattern.VolType - - // Setup generic test resource - s.genericVolumeTestResource.setupResource(driver, pattern) - - // Setup subPath test dependent resource - switch volType { - case testpatterns.InlineVolume: - if iDriver, ok := driver.(InlineVolumeTestDriver); ok { - s.roVolSource = iDriver.GetVolumeSource(true, fsType, s.genericVolumeTestResource.volume) + case testpatterns.PreprovisionedPV: + roVolSource = &v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: resource.pvc.Name, + ReadOnly: true, + }, + } + case testpatterns.DynamicPV: + roVolSource = &v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: resource.pvc.Name, + ReadOnly: true, + }, + } + default: + framework.Failf("SubPath test doesn't support: %s", volType) } - case testpatterns.PreprovisionedPV: - s.roVolSource = &v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: s.genericVolumeTestResource.pvc.Name, - ReadOnly: true, - }, - } - case testpatterns.DynamicPV: - s.roVolSource = &v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: s.genericVolumeTestResource.pvc.Name, - ReadOnly: true, - }, - } - default: - framework.Failf("SubPath test doesn't support: %s", volType) + + subPath := f.Namespace.Name + pod = SubpathTestPod(f, subPath, resource.volType, resource.volSource, true) + pod.Spec.NodeName = config.ClientNodeName + pod.Spec.NodeSelector = config.ClientNodeSelector + + formatPod = volumeFormatPod(f, resource.volSource) + formatPod.Spec.NodeName = config.ClientNodeName + formatPod.Spec.NodeSelector = config.ClientNodeSelector + + subPathDir = filepath.Join(volumePath, subPath) + filePathInSubpath = filepath.Join(volumePath, fileName) + filePathInVolume = filepath.Join(subPathDir, fileName) } - subPath := f.Namespace.Name - config := dInfo.Config - s.pod = SubpathTestPod(f, subPath, s.volType, s.volSource, true) - s.pod.Spec.NodeName = config.ClientNodeName - s.pod.Spec.NodeSelector = config.ClientNodeSelector + cleanup := func() { + if pod != nil { + By("Deleting pod") + err := framework.DeletePodWithWait(f, cs, pod) + Expect(err).ToNot(HaveOccurred(), "while deleting pod") + pod = nil + } - s.formatPod = volumeFormatPod(f, s.volSource) - s.formatPod.Spec.NodeName = config.ClientNodeName - s.formatPod.Spec.NodeSelector = config.ClientNodeSelector -} + if resource != nil { + resource.cleanupResource() + resource = nil + } -func (s *subPathTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) { - dInfo := driver.GetDriverInfo() - f := dInfo.Config.Framework + if testCleanup != nil { + testCleanup() + testCleanup = nil + } + } - // Cleanup subPath test dependent resource - By("Deleting pod") - err := framework.DeletePodWithWait(f, f.ClientSet, s.pod) - Expect(err).ToNot(HaveOccurred(), "while deleting pod") - - // Cleanup generic test resource - s.genericVolumeTestResource.cleanupResource(driver, pattern) -} - -type subPathTestInput struct { - f *framework.Framework - subPathDir string - filePathInSubpath string - filePathInVolume string - volType string - pod *v1.Pod - formatPod *v1.Pod - volSource *v1.VolumeSource - roVol *v1.VolumeSource -} - -func testSubPath(input *subPathTestInput) { It("should support non-existent path", func() { + init() + defer cleanup() + // Write the file in the subPath from init container 1 - setWriteCommand(input.filePathInSubpath, &input.pod.Spec.InitContainers[1]) + setWriteCommand(filePathInSubpath, &pod.Spec.InitContainers[1]) // Read it from outside the subPath from container 1 - testReadFile(input.f, input.filePathInVolume, input.pod, 1) + testReadFile(f, filePathInVolume, pod, 1) }) It("should support existing directory", func() { + init() + defer cleanup() + // Create the directory - setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir)) + setInitCommand(pod, fmt.Sprintf("mkdir -p %s", subPathDir)) // Write the file in the subPath from init container 1 - setWriteCommand(input.filePathInSubpath, &input.pod.Spec.InitContainers[1]) + setWriteCommand(filePathInSubpath, &pod.Spec.InitContainers[1]) // Read it from outside the subPath from container 1 - testReadFile(input.f, input.filePathInVolume, input.pod, 1) + testReadFile(f, filePathInVolume, pod, 1) }) It("should support existing single file", func() { + init() + defer cleanup() + // Create the file in the init container - setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", input.subPathDir, input.filePathInVolume)) + setInitCommand(pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", subPathDir, filePathInVolume)) // Read it from inside the subPath from container 0 - testReadFile(input.f, input.filePathInSubpath, input.pod, 0) + testReadFile(f, filePathInSubpath, pod, 0) }) It("should support file as subpath", func() { - // Create the file in the init container - setInitCommand(input.pod, fmt.Sprintf("echo %s > %s", input.f.Namespace.Name, input.subPathDir)) + init() + defer cleanup() - TestBasicSubpath(input.f, input.f.Namespace.Name, input.pod) + // Create the file in the init container + setInitCommand(pod, fmt.Sprintf("echo %s > %s", f.Namespace.Name, subPathDir)) + + TestBasicSubpath(f, f.Namespace.Name, pod) }) It("should fail if subpath directory is outside the volume [Slow]", func() { + init() + defer cleanup() + // Create the subpath outside the volume - setInitCommand(input.pod, fmt.Sprintf("ln -s /bin %s", input.subPathDir)) + setInitCommand(pod, fmt.Sprintf("ln -s /bin %s", subPathDir)) // Pod should fail - testPodFailSubpath(input.f, input.pod, false) + testPodFailSubpath(f, pod, false) }) It("should fail if subpath file is outside the volume [Slow]", func() { + init() + defer cleanup() + // Create the subpath outside the volume - setInitCommand(input.pod, fmt.Sprintf("ln -s /bin/sh %s", input.subPathDir)) + setInitCommand(pod, fmt.Sprintf("ln -s /bin/sh %s", subPathDir)) // Pod should fail - testPodFailSubpath(input.f, input.pod, false) + testPodFailSubpath(f, pod, false) }) It("should fail if non-existent subpath is outside the volume [Slow]", func() { + init() + defer cleanup() + // Create the subpath outside the volume - setInitCommand(input.pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", input.subPathDir)) + setInitCommand(pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", subPathDir)) // Pod should fail - testPodFailSubpath(input.f, input.pod, false) + testPodFailSubpath(f, pod, false) }) It("should fail if subpath with backstepping is outside the volume [Slow]", func() { + init() + defer cleanup() + // Create the subpath outside the volume - setInitCommand(input.pod, fmt.Sprintf("ln -s ../ %s", input.subPathDir)) + setInitCommand(pod, fmt.Sprintf("ln -s ../ %s", subPathDir)) // Pod should fail - testPodFailSubpath(input.f, input.pod, false) + testPodFailSubpath(f, pod, false) }) It("should support creating multiple subpath from same volumes [Slow]", func() { + init() + defer cleanup() + subpathDir1 := filepath.Join(volumePath, "subpath1") subpathDir2 := filepath.Join(volumePath, "subpath2") filepath1 := filepath.Join("/test-subpath1", fileName) filepath2 := filepath.Join("/test-subpath2", fileName) - setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2)) + setInitCommand(pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2)) - addSubpathVolumeContainer(&input.pod.Spec.Containers[0], v1.VolumeMount{ + addSubpathVolumeContainer(&pod.Spec.Containers[0], v1.VolumeMount{ Name: volumeName, MountPath: "/test-subpath1", SubPath: "subpath1", }) - addSubpathVolumeContainer(&input.pod.Spec.Containers[0], v1.VolumeMount{ + addSubpathVolumeContainer(&pod.Spec.Containers[0], v1.VolumeMount{ Name: volumeName, MountPath: "/test-subpath2", SubPath: "subpath2", }) // Write the files from container 0 and instantly read them back - addMultipleWrites(&input.pod.Spec.Containers[0], filepath1, filepath2) - testMultipleReads(input.f, input.pod, 0, filepath1, filepath2) + addMultipleWrites(&pod.Spec.Containers[0], filepath1, filepath2) + testMultipleReads(f, pod, 0, filepath1, filepath2) }) It("should support restarting containers using directory as subpath [Slow]", func() { - // Create the directory - setInitCommand(input.pod, fmt.Sprintf("mkdir -p %v; touch %v", input.subPathDir, probeFilePath)) + init() + defer cleanup() - testPodContainerRestart(input.f, input.pod) + // Create the directory + setInitCommand(pod, fmt.Sprintf("mkdir -p %v; touch %v", subPathDir, probeFilePath)) + + testPodContainerRestart(f, pod) }) It("should support restarting containers using file as subpath [Slow]", func() { - // Create the file - setInitCommand(input.pod, fmt.Sprintf("touch %v; touch %v", input.subPathDir, probeFilePath)) + init() + defer cleanup() - testPodContainerRestart(input.f, input.pod) + // Create the file + setInitCommand(pod, fmt.Sprintf("touch %v; touch %v", subPathDir, probeFilePath)) + + testPodContainerRestart(f, pod) }) It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() { - testSubpathReconstruction(input.f, input.pod, false) + init() + defer cleanup() + + testSubpathReconstruction(f, pod, false) }) It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() { - if strings.HasPrefix(input.volType, "hostPath") || strings.HasPrefix(input.volType, "csi-hostpath") { + init() + defer cleanup() + + if strings.HasPrefix(resource.volType, "hostPath") || strings.HasPrefix(resource.volType, "csi-hostpath") { // TODO: This skip should be removed once #61446 is fixed - framework.Skipf("%s volume type does not support reconstruction, skipping", input.volType) + framework.Skipf("%s volume type does not support reconstruction, skipping", resource.volType) } - testSubpathReconstruction(input.f, input.pod, true) + + testSubpathReconstruction(f, pod, true) }) It("should support readOnly directory specified in the volumeMount", func() { + init() + defer cleanup() + // Create the directory - setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir)) + setInitCommand(pod, fmt.Sprintf("mkdir -p %s", subPathDir)) // Write the file in the volume from init container 2 - setWriteCommand(input.filePathInVolume, &input.pod.Spec.InitContainers[2]) + setWriteCommand(filePathInVolume, &pod.Spec.InitContainers[2]) // Read it from inside the subPath from container 0 - input.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true - testReadFile(input.f, input.filePathInSubpath, input.pod, 0) + pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true + testReadFile(f, filePathInSubpath, pod, 0) }) It("should support readOnly file specified in the volumeMount", func() { + init() + defer cleanup() + // Create the file - setInitCommand(input.pod, fmt.Sprintf("touch %s", input.subPathDir)) + setInitCommand(pod, fmt.Sprintf("touch %s", subPathDir)) // Write the file in the volume from init container 2 - setWriteCommand(input.subPathDir, &input.pod.Spec.InitContainers[2]) + setWriteCommand(subPathDir, &pod.Spec.InitContainers[2]) // Read it from inside the subPath from container 0 - input.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true - testReadFile(input.f, volumePath, input.pod, 0) + pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true + testReadFile(f, volumePath, pod, 0) }) It("should support existing directories when readOnly specified in the volumeSource", func() { - if input.roVol == nil { - framework.Skipf("Volume type %v doesn't support readOnly source", input.volType) + init() + defer cleanup() + if roVolSource == nil { + framework.Skipf("Volume type %v doesn't support readOnly source", resource.volType) } - pod := input.pod.DeepCopy() + origpod := pod.DeepCopy() // Create the directory - setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir)) + setInitCommand(pod, fmt.Sprintf("mkdir -p %s", subPathDir)) // Write the file in the subPath from init container 1 - setWriteCommand(input.filePathInSubpath, &input.pod.Spec.InitContainers[1]) + setWriteCommand(filePathInSubpath, &pod.Spec.InitContainers[1]) // Read it from inside the subPath from container 0 - testReadFile(input.f, input.filePathInSubpath, input.pod, 0) + testReadFile(f, filePathInSubpath, pod, 0) // Reset the pod - input.pod = pod + pod = origpod // Set volume source to read only - input.pod.Spec.Volumes[0].VolumeSource = *input.roVol + pod.Spec.Volumes[0].VolumeSource = *roVolSource // Read it from inside the subPath from container 0 - testReadFile(input.f, input.filePathInSubpath, input.pod, 0) + testReadFile(f, filePathInSubpath, pod, 0) }) It("should verify container cannot write to subpath readonly volumes", func() { - if input.roVol == nil { - framework.Skipf("Volume type %v doesn't support readOnly source", input.volType) + init() + defer cleanup() + if roVolSource == nil { + framework.Skipf("Volume type %v doesn't support readOnly source", resource.volType) } // Format the volume while it's writable - formatVolume(input.f, input.formatPod) + formatVolume(f, formatPod) // Set volume source to read only - input.pod.Spec.Volumes[0].VolumeSource = *input.roVol + pod.Spec.Volumes[0].VolumeSource = *roVolSource // Write the file in the volume from container 0 - setWriteCommand(input.subPathDir, &input.pod.Spec.Containers[0]) + setWriteCommand(subPathDir, &pod.Spec.Containers[0]) // Pod should fail - testPodFailSubpath(input.f, input.pod, true) + testPodFailSubpath(f, pod, true) }) It("should be able to unmount after the subpath directory is deleted", func() { - // Change volume container to busybox so we can exec later - input.pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox) - input.pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"} + init() + defer cleanup() - By(fmt.Sprintf("Creating pod %s", input.pod.Name)) - removeUnusedContainers(input.pod) - pod, err := input.f.ClientSet.CoreV1().Pods(input.f.Namespace.Name).Create(input.pod) + // Change volume container to busybox so we can exec later + pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox) + pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"} + + By(fmt.Sprintf("Creating pod %s", pod.Name)) + removeUnusedContainers(pod) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) Expect(err).ToNot(HaveOccurred(), "while creating pod") defer func() { By(fmt.Sprintf("Deleting pod %s", pod.Name)) - framework.DeletePodWithWait(input.f, input.f.ClientSet, pod) + framework.DeletePodWithWait(f, f.ClientSet, pod) }() // Wait for pod to be running - err = framework.WaitForPodRunningInNamespace(input.f.ClientSet, pod) + err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod) Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running") // Exec into container that mounted the volume, delete subpath directory - rmCmd := fmt.Sprintf("rm -rf %s", input.subPathDir) + rmCmd := fmt.Sprintf("rm -rf %s", subPathDir) _, err = podContainerExec(pod, 1, rmCmd) Expect(err).ToNot(HaveOccurred(), "while removing subpath directory") diff --git a/test/e2e/storage/testsuites/testdriver.go b/test/e2e/storage/testsuites/testdriver.go index e1880af4841..d5b3bf58ba0 100644 --- a/test/e2e/storage/testsuites/testdriver.go +++ b/test/e2e/storage/testsuites/testdriver.go @@ -25,17 +25,29 @@ import ( "k8s.io/kubernetes/test/e2e/storage/testpatterns" ) -// TestDriver represents an interface for a driver to be tested in TestSuite +// TestDriver represents an interface for a driver to be tested in TestSuite. +// Except for GetDriverInfo, all methods will be called at test runtime and thus +// can use framework.Skipf, framework.Fatal, Gomega assertions, etc. type TestDriver interface { - // GetDriverInfo returns DriverInfo for the TestDriver + // GetDriverInfo returns DriverInfo for the TestDriver. This must be static + // information. GetDriverInfo() *DriverInfo - // CreateDriver creates all driver resources that is required for TestDriver method - // except CreateVolume - CreateDriver() - // CreateDriver cleanup all the resources that is created in CreateDriver - CleanupDriver() - // SkipUnsupportedTest skips test in Testpattern is not suitable to test with the TestDriver + + // SkipUnsupportedTest skips test if Testpattern is not + // suitable to test with the TestDriver. It gets called after + // parsing parameters of the test suite and before the + // framework is initialized. Cheap tests that just check + // parameters like the cloud provider can and should be + // done in SkipUnsupportedTest to avoid setting up more + // expensive resources like framework.Framework. Tests that + // depend on a connection to the cluster can be done in + // PrepareTest once the framework is ready. SkipUnsupportedTest(testpatterns.TestPattern) + + // PrepareTest is called at test execution time each time a new test case is about to start. + // It sets up all necessary resources and returns the per-test configuration + // plus a cleanup function that frees all allocated resources. + PrepareTest(f *framework.Framework) (*PerTestConfig, func()) } // TestVolume is the result of PreprovisionedVolumeTestDriver.CreateVolume. @@ -49,7 +61,7 @@ type TestVolume interface { type PreprovisionedVolumeTestDriver interface { TestDriver // CreateVolume creates a pre-provisioned volume of the desired volume type. - CreateVolume(volumeType testpatterns.TestVolType) TestVolume + CreateVolume(config *PerTestConfig, volumeType testpatterns.TestVolType) TestVolume } // InlineVolumeTestDriver represents an interface for a TestDriver that supports InlineVolume @@ -68,7 +80,6 @@ type PreprovisionedPVTestDriver interface { // GetPersistentVolumeSource returns a PersistentVolumeSource with volume node affinity for pre-provisioned Persistent Volume. // It will set readOnly and fsType to the PersistentVolumeSource, if TestDriver supports both of them. // It will return nil, if the TestDriver doesn't support either of the parameters. - // Volume node affinity is optional, it will be nil for volumes which does not have volume node affinity. GetPersistentVolumeSource(readOnly bool, fsType string, testVolume TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) } @@ -78,7 +89,7 @@ type DynamicPVTestDriver interface { // GetDynamicProvisionStorageClass returns a StorageClass dynamic provision Persistent Volume. // It will set fsType to the StorageClass, if TestDriver supports it. // It will return nil, if the TestDriver doesn't support it. - GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass + GetDynamicProvisionStorageClass(config *PerTestConfig, fsType string) *storagev1.StorageClass // GetClaimSize returns the size of the volume that is to be provisioned ("5Gi", "1Mi"). // The size must be chosen so that the resulting volume is large enough for all @@ -91,7 +102,7 @@ type SnapshottableTestDriver interface { TestDriver // GetSnapshotClass returns a SnapshotClass to create snapshot. // It will return nil, if the TestDriver doesn't support it. - GetSnapshotClass() *unstructured.Unstructured + GetSnapshotClass(config *PerTestConfig) *unstructured.Unstructured } // Capability represents a feature that a volume plugin supports @@ -112,7 +123,7 @@ const ( CapMultiPODs Capability = "multipods" ) -// DriverInfo represents a combination of parameters to be used in implementation of TestDriver +// DriverInfo represents static information about a TestDriver. type DriverInfo struct { Name string // Name of the driver FeatureTag string // FeatureTag for the driver @@ -122,14 +133,15 @@ type DriverInfo struct { SupportedMountOption sets.String // Map of string for supported mount option RequiredMountOption sets.String // Map of string for required mount option (Optional) Capabilities map[Capability]bool // Map that represents plugin capabilities - - Config TestConfig // Test configuration for the current test. } -// TestConfig represents parameters that control test execution. -// They can still be modified after defining tests, for example -// in a BeforeEach or when creating the driver. -type TestConfig struct { +// PerTestConfig represents parameters that control test execution. +// One instance gets allocated for each test and is then passed +// via pointer to functions involved in the test. +type PerTestConfig struct { + // The test driver for the test. + Driver TestDriver + // Some short word that gets inserted into dynamically // generated entities (pods, paths) as first part of the name // to make debugging easier. Can be the same for different @@ -154,8 +166,9 @@ type TestConfig struct { // the configuration that then has to be used to run tests. // The values above are ignored for such tests. ServerConfig *framework.VolumeTestConfig - - // TopologyEnabled indicates that the Topology feature gate - // should be enabled in external-provisioner - TopologyEnabled bool +} + +// GetUniqueDriverName returns unique driver name that can be used parallelly in tests +func (config *PerTestConfig) GetUniqueDriverName() string { + return config.Driver.GetDriverInfo().Name + "-" + config.Framework.UniqueName } diff --git a/test/e2e/storage/testsuites/volume_io.go b/test/e2e/storage/testsuites/volume_io.go index ad32f84fa5f..ddcda3eda61 100644 --- a/test/e2e/storage/testsuites/volume_io.go +++ b/test/e2e/storage/testsuites/volume_io.go @@ -74,87 +74,59 @@ func (t *volumeIOTestSuite) getTestSuiteInfo() TestSuiteInfo { return t.tsInfo } -func (t *volumeIOTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) { -} +func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { + var ( + dInfo = driver.GetDriverInfo() + config *PerTestConfig + testCleanup func() + resource *genericVolumeTestResource + ) -func createVolumeIOTestInput(pattern testpatterns.TestPattern, resource genericVolumeTestResource) volumeIOTestInput { - var fsGroup *int64 - driver := resource.driver - dInfo := driver.GetDriverInfo() - f := dInfo.Config.Framework - fileSizes := createFileSizes(dInfo.MaxFileSize) - volSource := resource.volSource + // No preconditions to test. Normally they would be in a BeforeEach here. - if volSource == nil { - framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name) + // This intentionally comes after checking the preconditions because it + // registers its own BeforeEach which creates the namespace. Beware that it + // also registers an AfterEach which renders f unusable. Any code using + // f must run inside an It or Context callback. + f := framework.NewDefaultFramework("volumeio") + + init := func() { + // Now do the more expensive test initialization. + config, testCleanup = driver.PrepareTest(f) + resource = createGenericVolumeTestResource(driver, config, pattern) + if resource.volSource == nil { + framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name) + } } - if dInfo.Capabilities[CapFsGroup] { - fsGroupVal := int64(1234) - fsGroup = &fsGroupVal + cleanup := func() { + if resource != nil { + resource.cleanupResource() + resource = nil + } + + if testCleanup != nil { + testCleanup() + testCleanup = nil + } } - return volumeIOTestInput{ - f: f, - name: dInfo.Name, - config: &dInfo.Config, - volSource: *volSource, - testFile: fmt.Sprintf("%s_io_test_%s", dInfo.Name, f.Namespace.Name), - podSec: v1.PodSecurityContext{ - FSGroup: fsGroup, - }, - fileSizes: fileSizes, - } -} - -func (t *volumeIOTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) { - Context(getTestNameStr(t, pattern), func() { - var ( - resource genericVolumeTestResource - input volumeIOTestInput - needsCleanup bool - ) - - BeforeEach(func() { - needsCleanup = false - // Skip unsupported tests to avoid unnecessary resource initialization - skipUnsupportedTest(t, driver, pattern) - needsCleanup = true - - // Setup test resource for driver and testpattern - resource = genericVolumeTestResource{} - resource.setupResource(driver, pattern) - - // Create test input - input = createVolumeIOTestInput(pattern, resource) - }) - - AfterEach(func() { - if needsCleanup { - resource.cleanupResource(driver, pattern) - } - }) - - execTestVolumeIO(&input) - }) -} - -type volumeIOTestInput struct { - f *framework.Framework - name string - config *TestConfig - volSource v1.VolumeSource - testFile string - podSec v1.PodSecurityContext - fileSizes []int64 -} - -func execTestVolumeIO(input *volumeIOTestInput) { It("should write files of various sizes, verify size, validate content [Slow]", func() { - f := input.f - cs := f.ClientSet + init() + defer cleanup() - err := testVolumeIO(f, cs, convertTestConfig(input.config), input.volSource, &input.podSec, input.testFile, input.fileSizes) + cs := f.ClientSet + fileSizes := createFileSizes(dInfo.MaxFileSize) + testFile := fmt.Sprintf("%s_io_test_%s", dInfo.Name, f.Namespace.Name) + var fsGroup *int64 + if dInfo.Capabilities[CapFsGroup] { + fsGroupVal := int64(1234) + fsGroup = &fsGroupVal + } + podSec := v1.PodSecurityContext{ + FSGroup: fsGroup, + } + err := testVolumeIO(f, cs, convertTestConfig(config), *resource.volSource, &podSec, testFile, fileSizes) Expect(err).NotTo(HaveOccurred()) }) } diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index 7ac60385eef..45631703621 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -61,317 +61,252 @@ func (t *volumeModeTestSuite) getTestSuiteInfo() TestSuiteInfo { return t.tsInfo } -func (t *volumeModeTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver TestDriver) { -} - -func createVolumeModeTestInput(pattern testpatterns.TestPattern, resource volumeModeTestResource) volumeModeTestInput { - driver := resource.driver - dInfo := driver.GetDriverInfo() - f := dInfo.Config.Framework - - return volumeModeTestInput{ - f: f, - sc: resource.sc, - pvc: resource.pvc, - pv: resource.pv, - testVolType: pattern.VolType, - nodeName: dInfo.Config.ClientNodeName, - volMode: pattern.VolMode, - isBlockSupported: dInfo.Capabilities[CapBlock], - } -} - -func getVolumeModeTestFunc(pattern testpatterns.TestPattern, driver TestDriver) func(*volumeModeTestInput) { - dInfo := driver.GetDriverInfo() - isBlockSupported := dInfo.Capabilities[CapBlock] - volMode := pattern.VolMode - volType := pattern.VolType - - switch volType { - case testpatterns.PreprovisionedPV: - if volMode == v1.PersistentVolumeBlock && !isBlockSupported { - return testVolumeModeFailForPreprovisionedPV - } - return testVolumeModeSuccessForPreprovisionedPV - case testpatterns.DynamicPV: - if volMode == v1.PersistentVolumeBlock && !isBlockSupported { - return testVolumeModeFailForDynamicPV - } - return testVolumeModeSuccessForDynamicPV - default: - framework.Failf("Volume mode test doesn't support volType: %v", volType) - } - return nil -} - -func (t *volumeModeTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) { - Context(getTestNameStr(t, pattern), func() { - var ( - resource volumeModeTestResource - input volumeModeTestInput - testFunc func(*volumeModeTestInput) - needsCleanup bool - ) - - testFunc = getVolumeModeTestFunc(pattern, driver) - - BeforeEach(func() { - needsCleanup = false - // Skip unsupported tests to avoid unnecessary resource initialization - skipUnsupportedTest(t, driver, pattern) - needsCleanup = true - - // Setup test resource for driver and testpattern - resource = volumeModeTestResource{} - resource.setupResource(driver, pattern) - - // Create test input - input = createVolumeModeTestInput(pattern, resource) - }) - - AfterEach(func() { - if needsCleanup { - resource.cleanupResource(driver, pattern) - } - }) - - testFunc(&input) - }) -} - -type volumeModeTestResource struct { - driver TestDriver - - sc *storagev1.StorageClass - pvc *v1.PersistentVolumeClaim - pv *v1.PersistentVolume - - volume TestVolume -} - -var _ TestResource = &volumeModeTestResource{} - -func (s *volumeModeTestResource) setupResource(driver TestDriver, pattern testpatterns.TestPattern) { - s.driver = driver - dInfo := driver.GetDriverInfo() - f := dInfo.Config.Framework - ns := f.Namespace - fsType := pattern.FsType - volBindMode := storagev1.VolumeBindingImmediate - volMode := pattern.VolMode - volType := pattern.VolType - +func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { var ( - scName string - pvSource *v1.PersistentVolumeSource - volumeNodeAffinity *v1.VolumeNodeAffinity + dInfo = driver.GetDriverInfo() + config *PerTestConfig + testCleanup func() + sc *storagev1.StorageClass + pvc *v1.PersistentVolumeClaim + pv *v1.PersistentVolume + volume TestVolume ) - // Create volume for pre-provisioned volume tests - s.volume = CreateVolume(driver, volType) + // No preconditions to test. Normally they would be in a BeforeEach here. - switch volType { - case testpatterns.PreprovisionedPV: - if volMode == v1.PersistentVolumeBlock { - scName = fmt.Sprintf("%s-%s-sc-for-block", ns.Name, dInfo.Name) - } else if volMode == v1.PersistentVolumeFilesystem { - scName = fmt.Sprintf("%s-%s-sc-for-file", ns.Name, dInfo.Name) - } - if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok { - pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, s.volume) - if pvSource == nil { - framework.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name) + // This intentionally comes after checking the preconditions because it + // registers its own BeforeEach which creates the namespace. Beware that it + // also registers an AfterEach which renders f unusable. Any code using + // f must run inside an It or Context callback. + f := framework.NewDefaultFramework("volumemode") + + init := func() { + // Now do the more expensive test initialization. + config, testCleanup = driver.PrepareTest(f) + + ns := f.Namespace + fsType := pattern.FsType + volBindMode := storagev1.VolumeBindingImmediate + + var ( + scName string + pvSource *v1.PersistentVolumeSource + volumeNodeAffinity *v1.VolumeNodeAffinity + ) + + // Create volume for pre-provisioned volume tests + volume = CreateVolume(driver, config, pattern.VolType) + + switch pattern.VolType { + case testpatterns.PreprovisionedPV: + if pattern.VolMode == v1.PersistentVolumeBlock { + scName = fmt.Sprintf("%s-%s-sc-for-block", ns.Name, dInfo.Name) + } else if pattern.VolMode == v1.PersistentVolumeFilesystem { + scName = fmt.Sprintf("%s-%s-sc-for-file", ns.Name, dInfo.Name) } + if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok { + pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, volume) + if pvSource == nil { + framework.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name) + } - sc, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, volMode, *pvSource, volumeNodeAffinity) - s.sc = sc - s.pv = framework.MakePersistentVolume(pvConfig) - s.pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns.Name) + storageClass, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, pattern.VolMode, *pvSource, volumeNodeAffinity) + sc = storageClass + pv = framework.MakePersistentVolume(pvConfig) + pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns.Name) + } + case testpatterns.DynamicPV: + if dDriver, ok := driver.(DynamicPVTestDriver); ok { + sc = dDriver.GetDynamicProvisionStorageClass(config, fsType) + if sc == nil { + framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name) + } + sc.VolumeBindingMode = &volBindMode + + claimSize := dDriver.GetClaimSize() + pvc = getClaim(claimSize, ns.Name) + pvc.Spec.StorageClassName = &sc.Name + pvc.Spec.VolumeMode = &pattern.VolMode + } + default: + framework.Failf("Volume mode test doesn't support: %s", pattern.VolType) + } + } + + cleanup := func() { + if pv != nil || pvc != nil { + By("Deleting pv and pvc") + errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, pv, pvc) + if len(errs) > 0 { + framework.Logf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs)) + } + pv = nil + pvc = nil + } + + if sc != nil { + By("Deleting sc") + deleteStorageClass(f.ClientSet, sc.Name) + sc = nil + } + + if volume != nil { + volume.DeleteVolume() + volume = nil + } + + if testCleanup != nil { + testCleanup() + testCleanup = nil + } + } + + // We register different tests depending on the drive + isBlockSupported := dInfo.Capabilities[CapBlock] + switch pattern.VolType { + case testpatterns.PreprovisionedPV: + if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported { + It("should fail to create pod by failing to mount volume", func() { + init() + defer cleanup() + + cs := f.ClientSet + ns := f.Namespace + var err error + + By("Creating sc") + sc, err = cs.StorageV1().StorageClasses().Create(sc) + Expect(err).NotTo(HaveOccurred()) + + By("Creating pv and pvc") + pv, err = cs.CoreV1().PersistentVolumes().Create(pv) + Expect(err).NotTo(HaveOccurred()) + + // Prebind pv + pvc.Spec.VolumeName = pv.Name + pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc) + Expect(err).NotTo(HaveOccurred()) + + framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, pv, pvc)) + + By("Creating pod") + pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{pvc}, + false, "", false, false, framework.SELinuxLabel, + nil, config.ClientNodeName, framework.PodStartTimeout) + defer func() { + framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod)) + }() + Expect(err).To(HaveOccurred()) + }) + } else { + It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() { + init() + defer cleanup() + + cs := f.ClientSet + ns := f.Namespace + var err error + + By("Creating sc") + sc, err = cs.StorageV1().StorageClasses().Create(sc) + Expect(err).NotTo(HaveOccurred()) + + By("Creating pv and pvc") + pv, err = cs.CoreV1().PersistentVolumes().Create(pv) + Expect(err).NotTo(HaveOccurred()) + + // Prebind pv + pvc.Spec.VolumeName = pv.Name + pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc) + Expect(err).NotTo(HaveOccurred()) + + framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, pv, pvc)) + + By("Creating pod") + pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{pvc}, + false, "", false, false, framework.SELinuxLabel, + nil, config.ClientNodeName, framework.PodStartTimeout) + defer func() { + framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod)) + }() + Expect(err).NotTo(HaveOccurred()) + + By("Checking if persistent volume exists as expected volume mode") + utils.CheckVolumeModeOfPath(pod, pattern.VolMode, "/mnt/volume1") + + By("Checking if read/write to persistent volume works properly") + utils.CheckReadWriteToPath(pod, pattern.VolMode, "/mnt/volume1") + }) + // TODO(mkimuram): Add more tests } case testpatterns.DynamicPV: - if dDriver, ok := driver.(DynamicPVTestDriver); ok { - s.sc = dDriver.GetDynamicProvisionStorageClass(fsType) - if s.sc == nil { - framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name) - } - s.sc.VolumeBindingMode = &volBindMode + if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported { + It("should fail in binding dynamic provisioned PV to PVC", func() { + init() + defer cleanup() - claimSize := dDriver.GetClaimSize() - s.pvc = getClaim(claimSize, ns.Name) - s.pvc.Spec.StorageClassName = &s.sc.Name - s.pvc.Spec.VolumeMode = &volMode + cs := f.ClientSet + ns := f.Namespace + var err error + + By("Creating sc") + sc, err = cs.StorageV1().StorageClasses().Create(sc) + Expect(err).NotTo(HaveOccurred()) + + By("Creating pv and pvc") + pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc) + Expect(err).NotTo(HaveOccurred()) + + err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) + Expect(err).To(HaveOccurred()) + }) + } else { + It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() { + init() + defer cleanup() + + cs := f.ClientSet + ns := f.Namespace + var err error + + By("Creating sc") + sc, err = cs.StorageV1().StorageClasses().Create(sc) + Expect(err).NotTo(HaveOccurred()) + + By("Creating pv and pvc") + pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc) + Expect(err).NotTo(HaveOccurred()) + + err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) + Expect(err).NotTo(HaveOccurred()) + + pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + pv, err = cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating pod") + pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{pvc}, + false, "", false, false, framework.SELinuxLabel, + nil, config.ClientNodeName, framework.PodStartTimeout) + defer func() { + framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod)) + }() + Expect(err).NotTo(HaveOccurred()) + + By("Checking if persistent volume exists as expected volume mode") + utils.CheckVolumeModeOfPath(pod, pattern.VolMode, "/mnt/volume1") + + By("Checking if read/write to persistent volume works properly") + utils.CheckReadWriteToPath(pod, pattern.VolMode, "/mnt/volume1") + }) + // TODO(mkimuram): Add more tests } default: - framework.Failf("Volume mode test doesn't support: %s", volType) - } -} - -func (s *volumeModeTestResource) cleanupResource(driver TestDriver, pattern testpatterns.TestPattern) { - dInfo := driver.GetDriverInfo() - f := dInfo.Config.Framework - cs := f.ClientSet - ns := f.Namespace - - By("Deleting pv and pvc") - errs := framework.PVPVCCleanup(cs, ns.Name, s.pv, s.pvc) - if len(errs) > 0 { - framework.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs)) - } - By("Deleting sc") - if s.sc != nil { - deleteStorageClass(cs, s.sc.Name) + framework.Failf("Volume mode test doesn't support volType: %v", pattern.VolType) } - // Cleanup volume for pre-provisioned volume tests - if s.volume != nil { - s.volume.DeleteVolume() - } -} - -type volumeModeTestInput struct { - f *framework.Framework - sc *storagev1.StorageClass - pvc *v1.PersistentVolumeClaim - pv *v1.PersistentVolume - testVolType testpatterns.TestVolType - nodeName string - volMode v1.PersistentVolumeMode - isBlockSupported bool -} - -func testVolumeModeFailForPreprovisionedPV(input *volumeModeTestInput) { - It("should fail to create pod by failing to mount volume", func() { - f := input.f - cs := f.ClientSet - ns := f.Namespace - var err error - - By("Creating sc") - input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc) - Expect(err).NotTo(HaveOccurred()) - - By("Creating pv and pvc") - input.pv, err = cs.CoreV1().PersistentVolumes().Create(input.pv) - Expect(err).NotTo(HaveOccurred()) - - // Prebind pv - input.pvc.Spec.VolumeName = input.pv.Name - input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc) - Expect(err).NotTo(HaveOccurred()) - - framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, input.pv, input.pvc)) - - By("Creating pod") - pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc}, - false, "", false, false, framework.SELinuxLabel, - nil, input.nodeName, framework.PodStartTimeout) - defer func() { - framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod)) - }() - Expect(err).To(HaveOccurred()) - }) -} - -func testVolumeModeSuccessForPreprovisionedPV(input *volumeModeTestInput) { - It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() { - f := input.f - cs := f.ClientSet - ns := f.Namespace - var err error - - By("Creating sc") - input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc) - Expect(err).NotTo(HaveOccurred()) - - By("Creating pv and pvc") - input.pv, err = cs.CoreV1().PersistentVolumes().Create(input.pv) - Expect(err).NotTo(HaveOccurred()) - - // Prebind pv - input.pvc.Spec.VolumeName = input.pv.Name - input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc) - Expect(err).NotTo(HaveOccurred()) - - framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, input.pv, input.pvc)) - - By("Creating pod") - pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc}, - false, "", false, false, framework.SELinuxLabel, - nil, input.nodeName, framework.PodStartTimeout) - defer func() { - framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod)) - }() - Expect(err).NotTo(HaveOccurred()) - - By("Checking if persistent volume exists as expected volume mode") - utils.CheckVolumeModeOfPath(pod, input.volMode, "/mnt/volume1") - - By("Checking if read/write to persistent volume works properly") - utils.CheckReadWriteToPath(pod, input.volMode, "/mnt/volume1") - }) - // TODO(mkimuram): Add more tests -} - -func testVolumeModeFailForDynamicPV(input *volumeModeTestInput) { - It("should fail in binding dynamic provisioned PV to PVC", func() { - f := input.f - cs := f.ClientSet - ns := f.Namespace - var err error - - By("Creating sc") - input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc) - Expect(err).NotTo(HaveOccurred()) - - By("Creating pv and pvc") - input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc) - Expect(err).NotTo(HaveOccurred()) - - err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, input.pvc.Namespace, input.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) - Expect(err).To(HaveOccurred()) - }) -} - -func testVolumeModeSuccessForDynamicPV(input *volumeModeTestInput) { - It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() { - f := input.f - cs := f.ClientSet - ns := f.Namespace - var err error - - By("Creating sc") - input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc) - Expect(err).NotTo(HaveOccurred()) - - By("Creating pv and pvc") - input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc) - Expect(err).NotTo(HaveOccurred()) - - err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, input.pvc.Namespace, input.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) - Expect(err).NotTo(HaveOccurred()) - - input.pvc, err = cs.CoreV1().PersistentVolumeClaims(input.pvc.Namespace).Get(input.pvc.Name, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - input.pv, err = cs.CoreV1().PersistentVolumes().Get(input.pvc.Spec.VolumeName, metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred()) - - By("Creating pod") - pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc}, - false, "", false, false, framework.SELinuxLabel, - nil, input.nodeName, framework.PodStartTimeout) - defer func() { - framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod)) - }() - Expect(err).NotTo(HaveOccurred()) - - By("Checking if persistent volume exists as expected volume mode") - utils.CheckVolumeModeOfPath(pod, input.volMode, "/mnt/volume1") - - By("Checking if read/write to persistent volume works properly") - utils.CheckReadWriteToPath(pod, input.volMode, "/mnt/volume1") - }) - // TODO(mkimuram): Add more tests } func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1.VolumeBindingMode, diff --git a/test/e2e/storage/testsuites/volumes.go b/test/e2e/storage/testsuites/volumes.go index 57222178eb0..d736f2e0c83 100644 --- a/test/e2e/storage/testsuites/volumes.go +++ b/test/e2e/storage/testsuites/volumes.go @@ -89,101 +89,76 @@ func skipExecTest(driver TestDriver) { } } -func createVolumesTestInput(pattern testpatterns.TestPattern, resource genericVolumeTestResource) volumesTestInput { - var fsGroup *int64 - driver := resource.driver - dInfo := driver.GetDriverInfo() - f := dInfo.Config.Framework - volSource := resource.volSource +func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { + var ( + dInfo = driver.GetDriverInfo() + config *PerTestConfig + testCleanup func() + resource *genericVolumeTestResource + ) - if volSource == nil { - framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name) + // No preconditions to test. Normally they would be in a BeforeEach here. + + // This intentionally comes after checking the preconditions because it + // registers its own BeforeEach which creates the namespace. Beware that it + // also registers an AfterEach which renders f unusable. Any code using + // f must run inside an It or Context callback. + f := framework.NewDefaultFramework("volumeio") + + init := func() { + // Now do the more expensive test initialization. + config, testCleanup = driver.PrepareTest(f) + resource = createGenericVolumeTestResource(driver, config, pattern) + if resource.volSource == nil { + framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name) + } } - if dInfo.Capabilities[CapFsGroup] { - fsGroupVal := int64(1234) - fsGroup = &fsGroupVal + cleanup := func() { + if resource != nil { + resource.cleanupResource() + resource = nil + } + + if testCleanup != nil { + testCleanup() + testCleanup = nil + } } - return volumesTestInput{ - f: f, - name: dInfo.Name, - config: &dInfo.Config, - fsGroup: fsGroup, - resource: resource, - fsType: pattern.FsType, - tests: []framework.VolumeTest{ + It("should be mountable", func() { + skipPersistenceTest(driver) + init() + defer func() { + framework.VolumeTestCleanup(f, convertTestConfig(config)) + cleanup() + }() + + tests := []framework.VolumeTest{ { - Volume: *volSource, + Volume: *resource.volSource, File: "index.html", // Must match content ExpectedContent: fmt.Sprintf("Hello from %s from namespace %s", dInfo.Name, f.Namespace.Name), }, - }, - } -} - -func (t *volumesTestSuite) execTest(driver TestDriver, pattern testpatterns.TestPattern) { - Context(getTestNameStr(t, pattern), func() { - var ( - resource genericVolumeTestResource - input volumesTestInput - needsCleanup bool - ) - - BeforeEach(func() { - needsCleanup = false - // Skip unsupported tests to avoid unnecessary resource initialization - skipUnsupportedTest(t, driver, pattern) - needsCleanup = true - - // Setup test resource for driver and testpattern - resource = genericVolumeTestResource{} - resource.setupResource(driver, pattern) - - // Create test input - input = createVolumesTestInput(pattern, resource) - }) - - AfterEach(func() { - if needsCleanup { - resource.cleanupResource(driver, pattern) - } - }) - - testVolumes(&input) + } + config := convertTestConfig(config) + framework.InjectHtml(f.ClientSet, config, tests[0].Volume, tests[0].ExpectedContent) + var fsGroup *int64 + if dInfo.Capabilities[CapFsGroup] { + fsGroupVal := int64(1234) + fsGroup = &fsGroupVal + } + framework.TestVolumeClient(f.ClientSet, config, fsGroup, pattern.FsType, tests) }) -} -type volumesTestInput struct { - f *framework.Framework - name string - config *TestConfig - fsGroup *int64 - fsType string - tests []framework.VolumeTest - resource genericVolumeTestResource -} - -func testVolumes(input *volumesTestInput) { - It("should be mountable", func() { - f := input.f - cs := f.ClientSet - defer framework.VolumeTestCleanup(f, convertTestConfig(input.config)) - - skipPersistenceTest(input.resource.driver) - - volumeTest := input.tests - config := convertTestConfig(input.config) - framework.InjectHtml(cs, config, volumeTest[0].Volume, volumeTest[0].ExpectedContent) - framework.TestVolumeClient(cs, config, input.fsGroup, input.fsType, input.tests) - }) It("should allow exec of files on the volume", func() { - f := input.f - skipExecTest(input.resource.driver) + skipExecTest(driver) + init() + defer cleanup() - testScriptInPod(f, input.resource.volType, input.resource.volSource, input.resource.driver.GetDriverInfo().Config.ClientNodeSelector) + testScriptInPod(f, resource.volType, resource.volSource, config.ClientNodeSelector) }) } diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index 7e2cb745675..701cd2da404 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -212,21 +212,22 @@ func testZonalDelayedBinding(c clientset.Interface, ns string, specifyAllowedTop action := "creating claims with class with waitForFirstConsumer" suffix := "delayed" var topoZone string - class := newStorageClass(test, ns, suffix) + test.Client = c + test.Class = newStorageClass(test, ns, suffix) if specifyAllowedTopology { action += " and allowedTopologies" suffix += "-topo" topoZone = getRandomClusterZone(c) - addSingleZoneAllowedTopologyToStorageClass(c, class, topoZone) + addSingleZoneAllowedTopologyToStorageClass(c, test.Class, topoZone) } By(action) var claims []*v1.PersistentVolumeClaim for i := 0; i < pvcCount; i++ { claim := newClaim(test, ns, suffix) - claim.Spec.StorageClassName = &class.Name + claim.Spec.StorageClassName = &test.Class.Name claims = append(claims, claim) } - pvs, node := testsuites.TestBindingWaitForFirstConsumerMultiPVC(test, c, claims, class, nil /* node selector */, false /* expect unschedulable */) + pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */) if node == nil { framework.Failf("unexpected nil node found") } @@ -440,10 +441,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { By("Testing " + test.Name) suffix := fmt.Sprintf("%d", i) - class := newStorageClass(test, ns, suffix) - claim := newClaim(test, ns, suffix) - claim.Spec.StorageClassName = &class.Name - testsuites.TestDynamicProvisioning(test, c, claim, class) + test.Client = c + test.Class = newStorageClass(test, ns, suffix) + test.Claim = newClaim(test, ns, suffix) + test.Claim.Spec.StorageClassName = &test.Class.Name + test.TestDynamicProvisioning() } // Run the last test with storage.k8s.io/v1beta1 on pvc @@ -455,9 +457,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { Expect(err).NotTo(HaveOccurred()) defer deleteStorageClass(c, class.Name) - claim := newClaim(*betaTest, ns, "beta") - claim.Spec.StorageClassName = &(class.Name) - testsuites.TestDynamicProvisioning(*betaTest, c, claim, nil) + betaTest.Client = c + betaTest.Class = nil + betaTest.Claim = newClaim(*betaTest, ns, "beta") + betaTest.Claim.Spec.StorageClassName = &(class.Name) + (*betaTest).TestDynamicProvisioning() } }) @@ -465,6 +469,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { framework.SkipUnlessProviderIs("gce", "gke") test := testsuites.StorageClassTest{ + Client: c, Name: "HDD PD on GCE/GKE", CloudProviders: []string{"gce", "gke"}, Provisioner: "kubernetes.io/gce-pd", @@ -479,12 +484,12 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, testsuites.NodeSelection{}) }, } - class := newStorageClass(test, ns, "reclaimpolicy") + test.Class = newStorageClass(test, ns, "reclaimpolicy") retain := v1.PersistentVolumeReclaimRetain - class.ReclaimPolicy = &retain - claim := newClaim(test, ns, "reclaimpolicy") - claim.Spec.StorageClassName = &class.Name - pv := testsuites.TestDynamicProvisioning(test, c, claim, class) + test.Class.ReclaimPolicy = &retain + test.Claim = newClaim(test, ns, "reclaimpolicy") + test.Claim.Spec.StorageClassName = &test.Class.Name + pv := test.TestDynamicProvisioning() By(fmt.Sprintf("waiting for the provisioned PV %q to enter phase %s", pv.Name, v1.VolumeReleased)) framework.ExpectNoError(framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second)) @@ -718,17 +723,18 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { By("creating a StorageClass") test := testsuites.StorageClassTest{ + Client: c, Name: "external provisioner test", Provisioner: externalPluginName, ClaimSize: "1500Mi", ExpectedSize: "1500Mi", } - class := newStorageClass(test, ns, "external") - claim := newClaim(test, ns, "external") - claim.Spec.StorageClassName = &(class.Name) + test.Class = newStorageClass(test, ns, "external") + test.Claim = newClaim(test, ns, "external") + test.Claim.Spec.StorageClassName = &test.Class.Name By("creating a claim with a external provisioning annotation") - testsuites.TestDynamicProvisioning(test, c, claim, class) + test.TestDynamicProvisioning() }) }) @@ -738,13 +744,14 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { By("creating a claim with no annotation") test := testsuites.StorageClassTest{ + Client: c, Name: "default", ClaimSize: "2Gi", ExpectedSize: "2Gi", } - claim := newClaim(test, ns, "default") - testsuites.TestDynamicProvisioning(test, c, claim, nil) + test.Claim = newClaim(test, ns, "default") + test.TestDynamicProvisioning() }) // Modifying the default storage class can be disruptive to other tests that depend on it @@ -817,6 +824,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { serverUrl := "http://" + pod.Status.PodIP + ":8081" By("creating a StorageClass") test := testsuites.StorageClassTest{ + Client: c, Name: "Gluster Dynamic provisioner test", Provisioner: "kubernetes.io/glusterfs", ClaimSize: "2Gi", @@ -824,13 +832,13 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { Parameters: map[string]string{"resturl": serverUrl}, } suffix := fmt.Sprintf("glusterdptest") - class := newStorageClass(test, ns, suffix) + test.Class = newStorageClass(test, ns, suffix) By("creating a claim object with a suffix for gluster dynamic provisioner") - claim := newClaim(test, ns, suffix) - claim.Spec.StorageClassName = &class.Name + test.Claim = newClaim(test, ns, suffix) + test.Claim.Spec.StorageClassName = &test.Class.Name - testsuites.TestDynamicProvisioning(test, c, claim, class) + test.TestDynamicProvisioning() }) }) @@ -929,12 +937,13 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { } By("creating a claim with class with allowedTopologies set") suffix := "topology" - class := newStorageClass(test, ns, suffix) + test.Client = c + test.Class = newStorageClass(test, ns, suffix) zone := getRandomClusterZone(c) - addSingleZoneAllowedTopologyToStorageClass(c, class, zone) - claim := newClaim(test, ns, suffix) - claim.Spec.StorageClassName = &class.Name - pv := testsuites.TestDynamicProvisioning(test, c, claim, class) + addSingleZoneAllowedTopologyToStorageClass(c, test.Class, zone) + test.Claim = newClaim(test, ns, suffix) + test.Claim.Spec.StorageClassName = &test.Class.Name + pv := test.TestDynamicProvisioning() checkZoneFromLabelAndAffinity(pv, zone, true) } }) From 46436240847abdf5dca0b26010d9ed685b5eecfa Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Wed, 13 Feb 2019 20:46:49 +0100 Subject: [PATCH 3/5] e2e/storage: remove test pattern filtering The recommended approach for not running unsuitable tests is to skip them at runtime with an explanation. Filtering out unsuitable test patters and thus not even defining unsuitable tests was done earlier because it was faster than skipping tests at runtime. But now these tests can be skipped efficiently, so this special case can be removed. --- test/e2e/storage/csi_volumes.go | 17 +---------------- test/e2e/storage/in_tree_volumes.go | 7 +------ test/e2e/storage/testsuites/base.go | 6 ++---- 3 files changed, 4 insertions(+), 26 deletions(-) diff --git a/test/e2e/storage/csi_volumes.go b/test/e2e/storage/csi_volumes.go index f6bb547135a..5ddd5972909 100644 --- a/test/e2e/storage/csi_volumes.go +++ b/test/e2e/storage/csi_volumes.go @@ -31,7 +31,6 @@ import ( csiclient "k8s.io/csi-api/pkg/client/clientset/versioned" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/drivers" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -62,27 +61,13 @@ var csiTestSuites = []func() testsuites.TestSuite{ testsuites.InitSnapshottableTestSuite, } -func csiTunePattern(patterns []testpatterns.TestPattern) []testpatterns.TestPattern { - tunedPatterns := []testpatterns.TestPattern{} - - for _, pattern := range patterns { - // Skip inline volume and pre-provsioned PV tests for csi drivers - if pattern.VolType == testpatterns.InlineVolume || pattern.VolType == testpatterns.PreprovisionedPV { - continue - } - tunedPatterns = append(tunedPatterns, pattern) - } - - return tunedPatterns -} - // This executes testSuites for csi volumes. var _ = utils.SIGDescribe("CSI Volumes", func() { for _, initDriver := range csiTestDrivers { curDriver := initDriver() Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() { - testsuites.DefineTestSuite(curDriver, csiTestSuites, csiTunePattern) + testsuites.DefineTestSuite(curDriver, csiTestSuites) }) } diff --git a/test/e2e/storage/in_tree_volumes.go b/test/e2e/storage/in_tree_volumes.go index f9b7c97901d..23ceaa3c860 100644 --- a/test/e2e/storage/in_tree_volumes.go +++ b/test/e2e/storage/in_tree_volumes.go @@ -19,7 +19,6 @@ package storage import ( . "github.com/onsi/ginkgo" "k8s.io/kubernetes/test/e2e/storage/drivers" - "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -58,17 +57,13 @@ var testSuites = []func() testsuites.TestSuite{ testsuites.InitProvisioningTestSuite, } -func intreeTunePattern(patterns []testpatterns.TestPattern) []testpatterns.TestPattern { - return patterns -} - // This executes testSuites for in-tree volumes. var _ = utils.SIGDescribe("In-tree Volumes", func() { for _, initDriver := range testDrivers { curDriver := initDriver() Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() { - testsuites.DefineTestSuite(curDriver, testSuites, intreeTunePattern) + testsuites.DefineTestSuite(curDriver, testSuites) }) } }) diff --git a/test/e2e/storage/testsuites/base.go b/test/e2e/storage/testsuites/base.go index 96e2ff16674..85dd8347c6f 100644 --- a/test/e2e/storage/testsuites/base.go +++ b/test/e2e/storage/testsuites/base.go @@ -67,12 +67,10 @@ func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string { } // DefineTestSuite defines tests for all testpatterns and all testSuites for a driver -func DefineTestSuite(driver TestDriver, tsInits []func() TestSuite, tunePatternFunc func([]testpatterns.TestPattern) []testpatterns.TestPattern) { +func DefineTestSuite(driver TestDriver, tsInits []func() TestSuite) { for _, testSuiteInit := range tsInits { suite := testSuiteInit() - patterns := tunePatternFunc(suite.getTestSuiteInfo().testPatterns) - - for _, pattern := range patterns { + for _, pattern := range suite.getTestSuiteInfo().testPatterns { p := pattern Context(getTestNameStr(suite, p), func() { BeforeEach(func() { From e79cd9efe1bbb213c9885d2e5f6b52baa5a74532 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Tue, 12 Feb 2019 21:53:49 +0100 Subject: [PATCH 4/5] e2e: update bazel BUILD files Generated via hack/update-bazel.sh. --- test/e2e/storage/BUILD | 2 -- test/e2e/storage/testsuites/BUILD | 1 + 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index 8385ca37676..7ecc8bf88c3 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -67,11 +67,9 @@ go_library( "//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/metrics:go_default_library", - "//test/e2e/framework/podlogs:go_default_library", "//test/e2e/framework/providers/gce:go_default_library", "//test/e2e/framework/testfiles:go_default_library", "//test/e2e/storage/drivers:go_default_library", - "//test/e2e/storage/testpatterns:go_default_library", "//test/e2e/storage/testsuites:go_default_library", "//test/e2e/storage/utils:go_default_library", "//test/utils/image:go_default_library", diff --git a/test/e2e/storage/testsuites/BUILD b/test/e2e/storage/testsuites/BUILD index 9a692f0a435..e6a0221a404 100644 --- a/test/e2e/storage/testsuites/BUILD +++ b/test/e2e/storage/testsuites/BUILD @@ -31,6 +31,7 @@ go_library( "//staging/src/k8s.io/client-go/dynamic:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/podlogs:go_default_library", "//test/e2e/storage/testpatterns:go_default_library", "//test/e2e/storage/utils:go_default_library", "//test/utils/image:go_default_library", From ec3655a1d40ced6b1873e627b736aae1cf242477 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Fri, 15 Feb 2019 10:50:18 +0100 Subject: [PATCH 5/5] e2e/storage: re-initialize all local variables There is a risk that the init function does not reset one of the local variables that was set by a previous test. To avoid this, all variables set by init are now in a struct which gets cleaned completely first. --- test/e2e/storage/testsuites/provisioning.go | 92 +++++----- test/e2e/storage/testsuites/subpath.go | 191 ++++++++++---------- test/e2e/storage/testsuites/volume_io.go | 32 ++-- test/e2e/storage/testsuites/volumemode.go | 142 ++++++++------- test/e2e/storage/testsuites/volumes.go | 38 ++-- 5 files changed, 255 insertions(+), 240 deletions(-) diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index 6153aab6603..9bca03aa78d 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -80,15 +80,19 @@ func (p *provisioningTestSuite) getTestSuiteInfo() TestSuiteInfo { } func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { - var ( - dInfo = driver.GetDriverInfo() - dDriver DynamicPVTestDriver + type local struct { config *PerTestConfig testCleanup func() - testCase *StorageClassTest - cs clientset.Interface - pvc *v1.PersistentVolumeClaim - sc *storage.StorageClass + + testCase *StorageClassTest + cs clientset.Interface + pvc *v1.PersistentVolumeClaim + sc *storage.StorageClass + } + var ( + dInfo = driver.GetDriverInfo() + dDriver DynamicPVTestDriver + l local ) BeforeEach(func() { @@ -110,30 +114,32 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte f := framework.NewDefaultFramework("provisioning") init := func() { + l = local{} + // Now do the more expensive test initialization. - config, testCleanup = driver.PrepareTest(f) - cs = config.Framework.ClientSet + l.config, l.testCleanup = driver.PrepareTest(f) + l.cs = l.config.Framework.ClientSet claimSize := dDriver.GetClaimSize() - sc = dDriver.GetDynamicProvisionStorageClass(config, "") - if sc == nil { + l.sc = dDriver.GetDynamicProvisionStorageClass(l.config, "") + if l.sc == nil { framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name) } - pvc = getClaim(claimSize, config.Framework.Namespace.Name) - pvc.Spec.StorageClassName = &sc.Name - framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", sc, pvc) - testCase = &StorageClassTest{ - Client: config.Framework.ClientSet, - Claim: pvc, - Class: sc, + l.pvc = getClaim(claimSize, l.config.Framework.Namespace.Name) + l.pvc.Spec.StorageClassName = &l.sc.Name + framework.Logf("In creating storage class object and pvc object for driver - sc: %v, pvc: %v", l.sc, l.pvc) + l.testCase = &StorageClassTest{ + Client: l.config.Framework.ClientSet, + Claim: l.pvc, + Class: l.sc, ClaimSize: claimSize, ExpectedSize: claimSize, } } cleanup := func() { - if testCleanup != nil { - testCleanup() - testCleanup = nil + if l.testCleanup != nil { + l.testCleanup() + l.testCleanup = nil } } @@ -141,7 +147,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte init() defer cleanup() - testCase.TestDynamicProvisioning() + l.testCase.TestDynamicProvisioning() }) It("should provision storage with mount options", func() { @@ -152,8 +158,8 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte init() defer cleanup() - testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List() - testCase.TestDynamicProvisioning() + l.testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List() + l.testCase.TestDynamicProvisioning() }) It("should access volume from different nodes", func() { @@ -164,19 +170,19 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte // locked onto a single node, then the driver is // usable on all of them *and* supports accessing a volume // from any node. - if config.ClientNodeName != "" { + if l.config.ClientNodeName != "" { framework.Skipf("Driver %q only supports testing on one node - skipping", dInfo.Name) } // Ensure that we actually have more than one node. - nodes := framework.GetReadySchedulableNodesOrDie(cs) + nodes := framework.GetReadySchedulableNodesOrDie(l.cs) if len(nodes.Items) <= 1 { framework.Skipf("need more than one node - skipping") } - testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { - PVMultiNodeCheck(cs, claim, volume, NodeSelection{Name: config.ClientNodeName}) + l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + PVMultiNodeCheck(l.cs, claim, volume, NodeSelection{Name: l.config.ClientNodeName}) } - testCase.TestDynamicProvisioning() + l.testCase.TestDynamicProvisioning() }) It("should create and delete block persistent volumes", func() { @@ -188,9 +194,9 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte defer cleanup() block := v1.PersistentVolumeBlock - testCase.VolumeMode = &block - pvc.Spec.VolumeMode = &block - testCase.TestDynamicProvisioning() + l.testCase.VolumeMode = &block + l.pvc.Spec.VolumeMode = &block + l.testCase.TestDynamicProvisioning() }) It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() { @@ -206,18 +212,18 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte init() defer cleanup() - dc := config.Framework.DynamicClient - vsc := sDriver.GetSnapshotClass(config) - dataSource, cleanupFunc := prepareDataSourceForProvisioning(NodeSelection{Name: config.ClientNodeName}, cs, dc, pvc, sc, vsc) + dc := l.config.Framework.DynamicClient + vsc := sDriver.GetSnapshotClass(l.config) + dataSource, cleanupFunc := prepareDataSourceForProvisioning(NodeSelection{Name: l.config.ClientNodeName}, l.cs, dc, l.pvc, l.sc, vsc) defer cleanupFunc() - pvc.Spec.DataSource = dataSource - testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + l.pvc.Spec.DataSource = dataSource + l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { By("checking whether the created volume has the pre-populated data") command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace) - RunInPodWithVolume(cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, NodeSelection{Name: config.ClientNodeName}) + RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, NodeSelection{Name: l.config.ClientNodeName}) } - testCase.TestDynamicProvisioning() + l.testCase.TestDynamicProvisioning() }) It("should allow concurrent writes on the same node", func() { @@ -228,7 +234,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte init() defer cleanup() - testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { + l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) { // We start two pods concurrently on the same node, // using the same PVC. Both wait for other to create a // file before returning. The pods are forced onto the @@ -241,7 +247,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte defer GinkgoRecover() defer wg.Done() node := NodeSelection{ - Name: config.ClientNodeName, + Name: l.config.ClientNodeName, } if podName == secondPodName { node.Affinity = &v1.Affinity{ @@ -259,13 +265,13 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte }, } } - RunInPodWithVolume(cs, claim.Namespace, claim.Name, podName, command, node) + RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, podName, command, node) } go run(firstPodName, "touch /mnt/test/first && while ! [ -f /mnt/test/second ]; do sleep 1; done") go run(secondPodName, "touch /mnt/test/second && while ! [ -f /mnt/test/first ]; do sleep 1; done") wg.Wait() } - testCase.TestDynamicProvisioning() + l.testCase.TestDynamicProvisioning() }) } diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index 1d36a0efbaa..adfb87e78aa 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -26,7 +26,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/wait" - clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -73,10 +72,10 @@ func (s *subPathTestSuite) getTestSuiteInfo() TestSuiteInfo { } func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { - var ( - config *PerTestConfig - testCleanup func() - cs clientset.Interface + type local struct { + config *PerTestConfig + testCleanup func() + resource *genericVolumeTestResource roVolSource *v1.VolumeSource pod *v1.Pod @@ -84,7 +83,8 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T subPathDir string filePathInSubpath string filePathInVolume string - ) + } + var l local // No preconditions to test. Normally they would be in a BeforeEach here. @@ -95,33 +95,30 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T f := framework.NewDefaultFramework("provisioning") init := func() { - cs = f.ClientSet // needed for cleanup, f.ClientSet itself gets reset too early + l = local{} // Now do the more expensive test initialization. - config, testCleanup = driver.PrepareTest(f) - fsType := pattern.FsType - volType := pattern.VolType - - resource = createGenericVolumeTestResource(driver, config, pattern) + l.config, l.testCleanup = driver.PrepareTest(f) + l.resource = createGenericVolumeTestResource(driver, l.config, pattern) // Setup subPath test dependent resource - roVolSource = nil + volType := pattern.VolType switch volType { case testpatterns.InlineVolume: if iDriver, ok := driver.(InlineVolumeTestDriver); ok { - roVolSource = iDriver.GetVolumeSource(true, fsType, resource.volume) + l.roVolSource = iDriver.GetVolumeSource(true, pattern.FsType, l.resource.volume) } case testpatterns.PreprovisionedPV: - roVolSource = &v1.VolumeSource{ + l.roVolSource = &v1.VolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: resource.pvc.Name, + ClaimName: l.resource.pvc.Name, ReadOnly: true, }, } case testpatterns.DynamicPV: - roVolSource = &v1.VolumeSource{ + l.roVolSource = &v1.VolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: resource.pvc.Name, + ClaimName: l.resource.pvc.Name, ReadOnly: true, }, } @@ -130,35 +127,35 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T } subPath := f.Namespace.Name - pod = SubpathTestPod(f, subPath, resource.volType, resource.volSource, true) - pod.Spec.NodeName = config.ClientNodeName - pod.Spec.NodeSelector = config.ClientNodeSelector + l.pod = SubpathTestPod(f, subPath, l.resource.volType, l.resource.volSource, true) + l.pod.Spec.NodeName = l.config.ClientNodeName + l.pod.Spec.NodeSelector = l.config.ClientNodeSelector - formatPod = volumeFormatPod(f, resource.volSource) - formatPod.Spec.NodeName = config.ClientNodeName - formatPod.Spec.NodeSelector = config.ClientNodeSelector + l.formatPod = volumeFormatPod(f, l.resource.volSource) + l.formatPod.Spec.NodeName = l.config.ClientNodeName + l.formatPod.Spec.NodeSelector = l.config.ClientNodeSelector - subPathDir = filepath.Join(volumePath, subPath) - filePathInSubpath = filepath.Join(volumePath, fileName) - filePathInVolume = filepath.Join(subPathDir, fileName) + l.subPathDir = filepath.Join(volumePath, subPath) + l.filePathInSubpath = filepath.Join(volumePath, fileName) + l.filePathInVolume = filepath.Join(l.subPathDir, fileName) } cleanup := func() { - if pod != nil { + if l.pod != nil { By("Deleting pod") - err := framework.DeletePodWithWait(f, cs, pod) + err := framework.DeletePodWithWait(f, f.ClientSet, l.pod) Expect(err).ToNot(HaveOccurred(), "while deleting pod") - pod = nil + l.pod = nil } - if resource != nil { - resource.cleanupResource() - resource = nil + if l.resource != nil { + l.resource.cleanupResource() + l.resource = nil } - if testCleanup != nil { - testCleanup() - testCleanup = nil + if l.testCleanup != nil { + l.testCleanup() + l.testCleanup = nil } } @@ -167,10 +164,10 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T defer cleanup() // Write the file in the subPath from init container 1 - setWriteCommand(filePathInSubpath, &pod.Spec.InitContainers[1]) + setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1]) // Read it from outside the subPath from container 1 - testReadFile(f, filePathInVolume, pod, 1) + testReadFile(f, l.filePathInVolume, l.pod, 1) }) It("should support existing directory", func() { @@ -178,13 +175,13 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T defer cleanup() // Create the directory - setInitCommand(pod, fmt.Sprintf("mkdir -p %s", subPathDir)) + setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir)) // Write the file in the subPath from init container 1 - setWriteCommand(filePathInSubpath, &pod.Spec.InitContainers[1]) + setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1]) // Read it from outside the subPath from container 1 - testReadFile(f, filePathInVolume, pod, 1) + testReadFile(f, l.filePathInVolume, l.pod, 1) }) It("should support existing single file", func() { @@ -192,10 +189,10 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T defer cleanup() // Create the file in the init container - setInitCommand(pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", subPathDir, filePathInVolume)) + setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", l.subPathDir, l.filePathInVolume)) // Read it from inside the subPath from container 0 - testReadFile(f, filePathInSubpath, pod, 0) + testReadFile(f, l.filePathInSubpath, l.pod, 0) }) It("should support file as subpath", func() { @@ -203,9 +200,9 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T defer cleanup() // Create the file in the init container - setInitCommand(pod, fmt.Sprintf("echo %s > %s", f.Namespace.Name, subPathDir)) + setInitCommand(l.pod, fmt.Sprintf("echo %s > %s", f.Namespace.Name, l.subPathDir)) - TestBasicSubpath(f, f.Namespace.Name, pod) + TestBasicSubpath(f, f.Namespace.Name, l.pod) }) It("should fail if subpath directory is outside the volume [Slow]", func() { @@ -213,10 +210,10 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T defer cleanup() // Create the subpath outside the volume - setInitCommand(pod, fmt.Sprintf("ln -s /bin %s", subPathDir)) + setInitCommand(l.pod, fmt.Sprintf("ln -s /bin %s", l.subPathDir)) // Pod should fail - testPodFailSubpath(f, pod, false) + testPodFailSubpath(f, l.pod, false) }) It("should fail if subpath file is outside the volume [Slow]", func() { @@ -224,10 +221,10 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T defer cleanup() // Create the subpath outside the volume - setInitCommand(pod, fmt.Sprintf("ln -s /bin/sh %s", subPathDir)) + setInitCommand(l.pod, fmt.Sprintf("ln -s /bin/sh %s", l.subPathDir)) // Pod should fail - testPodFailSubpath(f, pod, false) + testPodFailSubpath(f, l.pod, false) }) It("should fail if non-existent subpath is outside the volume [Slow]", func() { @@ -235,10 +232,10 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T defer cleanup() // Create the subpath outside the volume - setInitCommand(pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", subPathDir)) + setInitCommand(l.pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", l.subPathDir)) // Pod should fail - testPodFailSubpath(f, pod, false) + testPodFailSubpath(f, l.pod, false) }) It("should fail if subpath with backstepping is outside the volume [Slow]", func() { @@ -246,10 +243,10 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T defer cleanup() // Create the subpath outside the volume - setInitCommand(pod, fmt.Sprintf("ln -s ../ %s", subPathDir)) + setInitCommand(l.pod, fmt.Sprintf("ln -s ../ %s", l.subPathDir)) // Pod should fail - testPodFailSubpath(f, pod, false) + testPodFailSubpath(f, l.pod, false) }) It("should support creating multiple subpath from same volumes [Slow]", func() { @@ -260,22 +257,22 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T subpathDir2 := filepath.Join(volumePath, "subpath2") filepath1 := filepath.Join("/test-subpath1", fileName) filepath2 := filepath.Join("/test-subpath2", fileName) - setInitCommand(pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2)) + setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2)) - addSubpathVolumeContainer(&pod.Spec.Containers[0], v1.VolumeMount{ + addSubpathVolumeContainer(&l.pod.Spec.Containers[0], v1.VolumeMount{ Name: volumeName, MountPath: "/test-subpath1", SubPath: "subpath1", }) - addSubpathVolumeContainer(&pod.Spec.Containers[0], v1.VolumeMount{ + addSubpathVolumeContainer(&l.pod.Spec.Containers[0], v1.VolumeMount{ Name: volumeName, MountPath: "/test-subpath2", SubPath: "subpath2", }) // Write the files from container 0 and instantly read them back - addMultipleWrites(&pod.Spec.Containers[0], filepath1, filepath2) - testMultipleReads(f, pod, 0, filepath1, filepath2) + addMultipleWrites(&l.pod.Spec.Containers[0], filepath1, filepath2) + testMultipleReads(f, l.pod, 0, filepath1, filepath2) }) It("should support restarting containers using directory as subpath [Slow]", func() { @@ -283,9 +280,9 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T defer cleanup() // Create the directory - setInitCommand(pod, fmt.Sprintf("mkdir -p %v; touch %v", subPathDir, probeFilePath)) + setInitCommand(l.pod, fmt.Sprintf("mkdir -p %v; touch %v", l.subPathDir, probeFilePath)) - testPodContainerRestart(f, pod) + testPodContainerRestart(f, l.pod) }) It("should support restarting containers using file as subpath [Slow]", func() { @@ -293,28 +290,28 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T defer cleanup() // Create the file - setInitCommand(pod, fmt.Sprintf("touch %v; touch %v", subPathDir, probeFilePath)) + setInitCommand(l.pod, fmt.Sprintf("touch %v; touch %v", l.subPathDir, probeFilePath)) - testPodContainerRestart(f, pod) + testPodContainerRestart(f, l.pod) }) It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() { init() defer cleanup() - testSubpathReconstruction(f, pod, false) + testSubpathReconstruction(f, l.pod, false) }) It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() { init() defer cleanup() - if strings.HasPrefix(resource.volType, "hostPath") || strings.HasPrefix(resource.volType, "csi-hostpath") { + if strings.HasPrefix(l.resource.volType, "hostPath") || strings.HasPrefix(l.resource.volType, "csi-hostpath") { // TODO: This skip should be removed once #61446 is fixed - framework.Skipf("%s volume type does not support reconstruction, skipping", resource.volType) + framework.Skipf("%s volume type does not support reconstruction, skipping", l.resource.volType) } - testSubpathReconstruction(f, pod, true) + testSubpathReconstruction(f, l.pod, true) }) It("should support readOnly directory specified in the volumeMount", func() { @@ -322,14 +319,14 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T defer cleanup() // Create the directory - setInitCommand(pod, fmt.Sprintf("mkdir -p %s", subPathDir)) + setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir)) // Write the file in the volume from init container 2 - setWriteCommand(filePathInVolume, &pod.Spec.InitContainers[2]) + setWriteCommand(l.filePathInVolume, &l.pod.Spec.InitContainers[2]) // Read it from inside the subPath from container 0 - pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true - testReadFile(f, filePathInSubpath, pod, 0) + l.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true + testReadFile(f, l.filePathInSubpath, l.pod, 0) }) It("should support readOnly file specified in the volumeMount", func() { @@ -337,62 +334,62 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T defer cleanup() // Create the file - setInitCommand(pod, fmt.Sprintf("touch %s", subPathDir)) + setInitCommand(l.pod, fmt.Sprintf("touch %s", l.subPathDir)) // Write the file in the volume from init container 2 - setWriteCommand(subPathDir, &pod.Spec.InitContainers[2]) + setWriteCommand(l.subPathDir, &l.pod.Spec.InitContainers[2]) // Read it from inside the subPath from container 0 - pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true - testReadFile(f, volumePath, pod, 0) + l.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true + testReadFile(f, volumePath, l.pod, 0) }) It("should support existing directories when readOnly specified in the volumeSource", func() { init() defer cleanup() - if roVolSource == nil { - framework.Skipf("Volume type %v doesn't support readOnly source", resource.volType) + if l.roVolSource == nil { + framework.Skipf("Volume type %v doesn't support readOnly source", l.resource.volType) } - origpod := pod.DeepCopy() + origpod := l.pod.DeepCopy() // Create the directory - setInitCommand(pod, fmt.Sprintf("mkdir -p %s", subPathDir)) + setInitCommand(l.pod, fmt.Sprintf("mkdir -p %s", l.subPathDir)) // Write the file in the subPath from init container 1 - setWriteCommand(filePathInSubpath, &pod.Spec.InitContainers[1]) + setWriteCommand(l.filePathInSubpath, &l.pod.Spec.InitContainers[1]) // Read it from inside the subPath from container 0 - testReadFile(f, filePathInSubpath, pod, 0) + testReadFile(f, l.filePathInSubpath, l.pod, 0) // Reset the pod - pod = origpod + l.pod = origpod // Set volume source to read only - pod.Spec.Volumes[0].VolumeSource = *roVolSource + l.pod.Spec.Volumes[0].VolumeSource = *l.roVolSource // Read it from inside the subPath from container 0 - testReadFile(f, filePathInSubpath, pod, 0) + testReadFile(f, l.filePathInSubpath, l.pod, 0) }) It("should verify container cannot write to subpath readonly volumes", func() { init() defer cleanup() - if roVolSource == nil { - framework.Skipf("Volume type %v doesn't support readOnly source", resource.volType) + if l.roVolSource == nil { + framework.Skipf("Volume type %v doesn't support readOnly source", l.resource.volType) } // Format the volume while it's writable - formatVolume(f, formatPod) + formatVolume(f, l.formatPod) // Set volume source to read only - pod.Spec.Volumes[0].VolumeSource = *roVolSource + l.pod.Spec.Volumes[0].VolumeSource = *l.roVolSource // Write the file in the volume from container 0 - setWriteCommand(subPathDir, &pod.Spec.Containers[0]) + setWriteCommand(l.subPathDir, &l.pod.Spec.Containers[0]) // Pod should fail - testPodFailSubpath(f, pod, true) + testPodFailSubpath(f, l.pod, true) }) It("should be able to unmount after the subpath directory is deleted", func() { @@ -400,12 +397,12 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T defer cleanup() // Change volume container to busybox so we can exec later - pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox) - pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"} + l.pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox) + l.pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"} - By(fmt.Sprintf("Creating pod %s", pod.Name)) - removeUnusedContainers(pod) - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + By(fmt.Sprintf("Creating pod %s", l.pod.Name)) + removeUnusedContainers(l.pod) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(l.pod) Expect(err).ToNot(HaveOccurred(), "while creating pod") defer func() { By(fmt.Sprintf("Deleting pod %s", pod.Name)) @@ -413,12 +410,12 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T }() // Wait for pod to be running - err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod) + err = framework.WaitForPodRunningInNamespace(f.ClientSet, l.pod) Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running") // Exec into container that mounted the volume, delete subpath directory - rmCmd := fmt.Sprintf("rm -rf %s", subPathDir) - _, err = podContainerExec(pod, 1, rmCmd) + rmCmd := fmt.Sprintf("rm -rf %s", l.subPathDir) + _, err = podContainerExec(l.pod, 1, rmCmd) Expect(err).ToNot(HaveOccurred(), "while removing subpath directory") // Delete pod (from defer) and wait for it to be successfully deleted diff --git a/test/e2e/storage/testsuites/volume_io.go b/test/e2e/storage/testsuites/volume_io.go index ddcda3eda61..1ec29ba5a07 100644 --- a/test/e2e/storage/testsuites/volume_io.go +++ b/test/e2e/storage/testsuites/volume_io.go @@ -75,11 +75,15 @@ func (t *volumeIOTestSuite) getTestSuiteInfo() TestSuiteInfo { } func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { - var ( - dInfo = driver.GetDriverInfo() + type local struct { config *PerTestConfig testCleanup func() - resource *genericVolumeTestResource + + resource *genericVolumeTestResource + } + var ( + dInfo = driver.GetDriverInfo() + l local ) // No preconditions to test. Normally they would be in a BeforeEach here. @@ -91,23 +95,25 @@ func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns. f := framework.NewDefaultFramework("volumeio") init := func() { + l = local{} + // Now do the more expensive test initialization. - config, testCleanup = driver.PrepareTest(f) - resource = createGenericVolumeTestResource(driver, config, pattern) - if resource.volSource == nil { + l.config, l.testCleanup = driver.PrepareTest(f) + l.resource = createGenericVolumeTestResource(driver, l.config, pattern) + if l.resource.volSource == nil { framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name) } } cleanup := func() { - if resource != nil { - resource.cleanupResource() - resource = nil + if l.resource != nil { + l.resource.cleanupResource() + l.resource = nil } - if testCleanup != nil { - testCleanup() - testCleanup = nil + if l.testCleanup != nil { + l.testCleanup() + l.testCleanup = nil } } @@ -126,7 +132,7 @@ func (t *volumeIOTestSuite) defineTests(driver TestDriver, pattern testpatterns. podSec := v1.PodSecurityContext{ FSGroup: fsGroup, } - err := testVolumeIO(f, cs, convertTestConfig(config), *resource.volSource, &podSec, testFile, fileSizes) + err := testVolumeIO(f, cs, convertTestConfig(l.config), *l.resource.volSource, &podSec, testFile, fileSizes) Expect(err).NotTo(HaveOccurred()) }) } diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index 45631703621..b90fddb9b44 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -26,6 +26,7 @@ import ( storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" + clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -62,14 +63,20 @@ func (t *volumeModeTestSuite) getTestSuiteInfo() TestSuiteInfo { } func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { - var ( - dInfo = driver.GetDriverInfo() + type local struct { config *PerTestConfig testCleanup func() - sc *storagev1.StorageClass - pvc *v1.PersistentVolumeClaim - pv *v1.PersistentVolume - volume TestVolume + + cs clientset.Interface + ns *v1.Namespace + sc *storagev1.StorageClass + pvc *v1.PersistentVolumeClaim + pv *v1.PersistentVolume + volume TestVolume + } + var ( + dInfo = driver.GetDriverInfo() + l local ) // No preconditions to test. Normally they would be in a BeforeEach here. @@ -81,10 +88,13 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern f := framework.NewDefaultFramework("volumemode") init := func() { - // Now do the more expensive test initialization. - config, testCleanup = driver.PrepareTest(f) + l = local{} + l.ns = f.Namespace + l.cs = f.ClientSet + + // Now do the more expensive test initialization. + l.config, l.testCleanup = driver.PrepareTest(f) - ns := f.Namespace fsType := pattern.FsType volBindMode := storagev1.VolumeBindingImmediate @@ -95,38 +105,38 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern ) // Create volume for pre-provisioned volume tests - volume = CreateVolume(driver, config, pattern.VolType) + l.volume = CreateVolume(driver, l.config, pattern.VolType) switch pattern.VolType { case testpatterns.PreprovisionedPV: if pattern.VolMode == v1.PersistentVolumeBlock { - scName = fmt.Sprintf("%s-%s-sc-for-block", ns.Name, dInfo.Name) + scName = fmt.Sprintf("%s-%s-sc-for-block", l.ns.Name, dInfo.Name) } else if pattern.VolMode == v1.PersistentVolumeFilesystem { - scName = fmt.Sprintf("%s-%s-sc-for-file", ns.Name, dInfo.Name) + scName = fmt.Sprintf("%s-%s-sc-for-file", l.ns.Name, dInfo.Name) } if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok { - pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, volume) + pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, l.volume) if pvSource == nil { framework.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name) } storageClass, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, pattern.VolMode, *pvSource, volumeNodeAffinity) - sc = storageClass - pv = framework.MakePersistentVolume(pvConfig) - pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns.Name) + l.sc = storageClass + l.pv = framework.MakePersistentVolume(pvConfig) + l.pvc = framework.MakePersistentVolumeClaim(pvcConfig, l.ns.Name) } case testpatterns.DynamicPV: if dDriver, ok := driver.(DynamicPVTestDriver); ok { - sc = dDriver.GetDynamicProvisionStorageClass(config, fsType) - if sc == nil { + l.sc = dDriver.GetDynamicProvisionStorageClass(l.config, fsType) + if l.sc == nil { framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name) } - sc.VolumeBindingMode = &volBindMode + l.sc.VolumeBindingMode = &volBindMode claimSize := dDriver.GetClaimSize() - pvc = getClaim(claimSize, ns.Name) - pvc.Spec.StorageClassName = &sc.Name - pvc.Spec.VolumeMode = &pattern.VolMode + l.pvc = getClaim(claimSize, l.ns.Name) + l.pvc.Spec.StorageClassName = &l.sc.Name + l.pvc.Spec.VolumeMode = &pattern.VolMode } default: framework.Failf("Volume mode test doesn't support: %s", pattern.VolType) @@ -134,30 +144,30 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern } cleanup := func() { - if pv != nil || pvc != nil { + if l.pv != nil || l.pvc != nil { By("Deleting pv and pvc") - errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, pv, pvc) + errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, l.pv, l.pvc) if len(errs) > 0 { framework.Logf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs)) } - pv = nil - pvc = nil + l.pv = nil + l.pvc = nil } - if sc != nil { + if l.sc != nil { By("Deleting sc") - deleteStorageClass(f.ClientSet, sc.Name) - sc = nil + deleteStorageClass(f.ClientSet, l.sc.Name) + l.sc = nil } - if volume != nil { - volume.DeleteVolume() - volume = nil + if l.volume != nil { + l.volume.DeleteVolume() + l.volume = nil } - if testCleanup != nil { - testCleanup() - testCleanup = nil + if l.testCleanup != nil { + l.testCleanup() + l.testCleanup = nil } } @@ -170,31 +180,29 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern init() defer cleanup() - cs := f.ClientSet - ns := f.Namespace var err error By("Creating sc") - sc, err = cs.StorageV1().StorageClasses().Create(sc) + l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc) Expect(err).NotTo(HaveOccurred()) By("Creating pv and pvc") - pv, err = cs.CoreV1().PersistentVolumes().Create(pv) + l.pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.pv) Expect(err).NotTo(HaveOccurred()) // Prebind pv - pvc.Spec.VolumeName = pv.Name - pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc) + l.pvc.Spec.VolumeName = l.pv.Name + l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc) Expect(err).NotTo(HaveOccurred()) - framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, pv, pvc)) + framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc)) By("Creating pod") - pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{pvc}, + pod, err := framework.CreateSecPodWithNodeName(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, false, "", false, false, framework.SELinuxLabel, - nil, config.ClientNodeName, framework.PodStartTimeout) + nil, l.config.ClientNodeName, framework.PodStartTimeout) defer func() { - framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod)) + framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod)) }() Expect(err).To(HaveOccurred()) }) @@ -203,31 +211,29 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern init() defer cleanup() - cs := f.ClientSet - ns := f.Namespace var err error By("Creating sc") - sc, err = cs.StorageV1().StorageClasses().Create(sc) + l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc) Expect(err).NotTo(HaveOccurred()) By("Creating pv and pvc") - pv, err = cs.CoreV1().PersistentVolumes().Create(pv) + l.pv, err = l.cs.CoreV1().PersistentVolumes().Create(l.pv) Expect(err).NotTo(HaveOccurred()) // Prebind pv - pvc.Spec.VolumeName = pv.Name - pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc) + l.pvc.Spec.VolumeName = l.pv.Name + l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc) Expect(err).NotTo(HaveOccurred()) - framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, pv, pvc)) + framework.ExpectNoError(framework.WaitOnPVandPVC(l.cs, l.ns.Name, l.pv, l.pvc)) By("Creating pod") - pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{pvc}, + pod, err := framework.CreateSecPodWithNodeName(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, false, "", false, false, framework.SELinuxLabel, - nil, config.ClientNodeName, framework.PodStartTimeout) + nil, l.config.ClientNodeName, framework.PodStartTimeout) defer func() { - framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod)) + framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod)) }() Expect(err).NotTo(HaveOccurred()) @@ -245,19 +251,17 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern init() defer cleanup() - cs := f.ClientSet - ns := f.Namespace var err error By("Creating sc") - sc, err = cs.StorageV1().StorageClasses().Create(sc) + l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc) Expect(err).NotTo(HaveOccurred()) By("Creating pv and pvc") - pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc) + l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc) Expect(err).NotTo(HaveOccurred()) - err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) + err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) Expect(err).To(HaveOccurred()) }) } else { @@ -265,33 +269,31 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern init() defer cleanup() - cs := f.ClientSet - ns := f.Namespace var err error By("Creating sc") - sc, err = cs.StorageV1().StorageClasses().Create(sc) + l.sc, err = l.cs.StorageV1().StorageClasses().Create(l.sc) Expect(err).NotTo(HaveOccurred()) By("Creating pv and pvc") - pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(pvc) + l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(l.pvc) Expect(err).NotTo(HaveOccurred()) - err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) + err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, l.cs, l.pvc.Namespace, l.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) Expect(err).NotTo(HaveOccurred()) - pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + l.pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.pvc.Namespace).Get(l.pvc.Name, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) - pv, err = cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{}) + l.pv, err = l.cs.CoreV1().PersistentVolumes().Get(l.pvc.Spec.VolumeName, metav1.GetOptions{}) Expect(err).NotTo(HaveOccurred()) By("Creating pod") - pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{pvc}, + pod, err := framework.CreateSecPodWithNodeName(l.cs, l.ns.Name, []*v1.PersistentVolumeClaim{l.pvc}, false, "", false, false, framework.SELinuxLabel, - nil, config.ClientNodeName, framework.PodStartTimeout) + nil, l.config.ClientNodeName, framework.PodStartTimeout) defer func() { - framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod)) + framework.ExpectNoError(framework.DeletePodWithWait(f, l.cs, pod)) }() Expect(err).NotTo(HaveOccurred()) diff --git a/test/e2e/storage/testsuites/volumes.go b/test/e2e/storage/testsuites/volumes.go index d736f2e0c83..3820295dfa6 100644 --- a/test/e2e/storage/testsuites/volumes.go +++ b/test/e2e/storage/testsuites/volumes.go @@ -90,12 +90,14 @@ func skipExecTest(driver TestDriver) { } func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.TestPattern) { - var ( - dInfo = driver.GetDriverInfo() + type local struct { config *PerTestConfig testCleanup func() - resource *genericVolumeTestResource - ) + + resource *genericVolumeTestResource + } + var dInfo = driver.GetDriverInfo() + var l local // No preconditions to test. Normally they would be in a BeforeEach here. @@ -106,23 +108,25 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T f := framework.NewDefaultFramework("volumeio") init := func() { + l = local{} + // Now do the more expensive test initialization. - config, testCleanup = driver.PrepareTest(f) - resource = createGenericVolumeTestResource(driver, config, pattern) - if resource.volSource == nil { + l.config, l.testCleanup = driver.PrepareTest(f) + l.resource = createGenericVolumeTestResource(driver, l.config, pattern) + if l.resource.volSource == nil { framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name) } } cleanup := func() { - if resource != nil { - resource.cleanupResource() - resource = nil + if l.resource != nil { + l.resource.cleanupResource() + l.resource = nil } - if testCleanup != nil { - testCleanup() - testCleanup = nil + if l.testCleanup != nil { + l.testCleanup() + l.testCleanup = nil } } @@ -130,20 +134,20 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T skipPersistenceTest(driver) init() defer func() { - framework.VolumeTestCleanup(f, convertTestConfig(config)) + framework.VolumeTestCleanup(f, convertTestConfig(l.config)) cleanup() }() tests := []framework.VolumeTest{ { - Volume: *resource.volSource, + Volume: *l.resource.volSource, File: "index.html", // Must match content ExpectedContent: fmt.Sprintf("Hello from %s from namespace %s", dInfo.Name, f.Namespace.Name), }, } - config := convertTestConfig(config) + config := convertTestConfig(l.config) framework.InjectHtml(f.ClientSet, config, tests[0].Volume, tests[0].ExpectedContent) var fsGroup *int64 if dInfo.Capabilities[CapFsGroup] { @@ -158,7 +162,7 @@ func (t *volumesTestSuite) defineTests(driver TestDriver, pattern testpatterns.T init() defer cleanup() - testScriptInPod(f, resource.volType, resource.volSource, config.ClientNodeSelector) + testScriptInPod(f, l.resource.volType, l.resource.volSource, l.config.ClientNodeSelector) }) }