diff --git a/hack/.golint_failures b/hack/.golint_failures index c240ea2073f..7752da37da1 100644 --- a/hack/.golint_failures +++ b/hack/.golint_failures @@ -751,6 +751,8 @@ test/e2e/scalability test/e2e/scheduling test/e2e/servicecatalog test/e2e/storage +test/e2e/storage/drivers +test/e2e/storage/testsuites test/e2e/storage/utils test/e2e/storage/vsphere test/e2e/ui diff --git a/test/e2e/framework/pv_util.go b/test/e2e/framework/pv_util.go index 84f80770e78..8b14bedf3b6 100644 --- a/test/e2e/framework/pv_util.go +++ b/test/e2e/framework/pv_util.go @@ -507,7 +507,6 @@ func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) error { // Deletes the passed-in pod and waits for the pod to be terminated. Resilient to the pod // not existing. func DeletePodWithWait(f *Framework, c clientset.Interface, pod *v1.Pod) error { - const maxWait = 5 * time.Minute if pod == nil { return nil } @@ -519,8 +518,8 @@ func DeletePodWithWait(f *Framework, c clientset.Interface, pod *v1.Pod) error { } return fmt.Errorf("pod Delete API error: %v", err) } - Logf("Wait up to %v for pod %q to be fully deleted", maxWait, pod.Name) - err = f.WaitForPodNotFound(pod.Name, maxWait) + Logf("Wait up to %v for pod %q to be fully deleted", PodDeleteTimeout, pod.Name) + err = f.WaitForPodNotFound(pod.Name, PodDeleteTimeout) if err != nil { return fmt.Errorf("pod %q was not deleted: %v", pod.Name, err) } @@ -1006,11 +1005,19 @@ func CreateNginxPod(client clientset.Interface, namespace string, nodeSelector m // create security pod with given claims func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, timeout time.Duration) (*v1.Pod, error) { + return CreateSecPodWithNodeName(client, namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup, "", timeout) +} + +// create security pod with given claims +func CreateSecPodWithNodeName(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, nodeName string, timeout time.Duration) (*v1.Pod, error) { pod := MakeSecPod(namespace, pvclaims, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup) pod, err := client.CoreV1().Pods(namespace).Create(pod) if err != nil { return nil, fmt.Errorf("pod Create API error: %v", err) } + // Setting nodeName + pod.Spec.NodeName = nodeName + // Waiting for pod to be running err = WaitTimeoutForPodRunningInNamespace(client, pod.Name, namespace, timeout) if err != nil { diff --git a/test/e2e/framework/volume_util.go b/test/e2e/framework/volume_util.go index 7bb0543e025..4836e832d49 100644 --- a/test/e2e/framework/volume_util.go +++ b/test/e2e/framework/volume_util.go @@ -68,7 +68,7 @@ const ( TiB int64 = 1024 * GiB // Waiting period for volume server (Ceph, ...) to initialize itself. - VolumeServerPodStartupTimeout = 1 * time.Minute + VolumeServerPodStartupTimeout = 3 * time.Minute // Waiting period for pod to be cleaned up and unmount its volumes so we // don't tear down containers with NFS/Ceph/Gluster server too early. @@ -356,16 +356,42 @@ func StartVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1. return pod } +// Wrapper of cleanup function for volume server without secret created by specific CreateStorageServer function. +func CleanUpVolumeServer(f *Framework, serverPod *v1.Pod) { + CleanUpVolumeServerWithSecret(f, serverPod, nil) +} + +// Wrapper of cleanup function for volume server with secret created by specific CreateStorageServer function. +func CleanUpVolumeServerWithSecret(f *Framework, serverPod *v1.Pod, secret *v1.Secret) { + cs := f.ClientSet + ns := f.Namespace + + if secret != nil { + Logf("Deleting server secret %q...", secret.Name) + err := cs.CoreV1().Secrets(ns.Name).Delete(secret.Name, &metav1.DeleteOptions{}) + if err != nil { + Logf("Delete secret failed: %v", err) + } + } + + Logf("Deleting server pod %q...", serverPod.Name) + err := DeletePodWithWait(f, cs, serverPod) + if err != nil { + Logf("Server pod delete failed: %v", err) + } +} + // Clean both server and client pods. func VolumeTestCleanup(f *Framework, config VolumeTestConfig) { By(fmt.Sprint("cleaning the environment after ", config.Prefix)) defer GinkgoRecover() - client := f.ClientSet - podClient := client.CoreV1().Pods(config.Namespace) + cs := f.ClientSet - err := podClient.Delete(config.Prefix+"-client", nil) + pod, err := cs.CoreV1().Pods(config.Namespace).Get(config.Prefix+"-client", metav1.GetOptions{}) + ExpectNoError(err, "Failed to get client pod: %v", err) + err = DeletePodWithWait(f, cs, pod) if err != nil { // Log the error before failing test: if the test has already failed, // framework.ExpectNoError() won't print anything to logs! @@ -374,15 +400,9 @@ func VolumeTestCleanup(f *Framework, config VolumeTestConfig) { } if config.ServerImage != "" { - if err := f.WaitForPodTerminated(config.Prefix+"-client", ""); !apierrs.IsNotFound(err) { - ExpectNoError(err, "Failed to wait client pod terminated: %v", err) - } - // See issue #24100. - // Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits. - By("sleeping a bit so kubelet can unmount and detach the volume") - time.Sleep(PodCleanupTimeout) - - err = podClient.Delete(config.Prefix+"-server", nil) + pod, err := cs.CoreV1().Pods(config.Namespace).Get(config.Prefix+"-server", metav1.GetOptions{}) + ExpectNoError(err, "Failed to get server pod: %v", err) + err = DeletePodWithWait(f, cs, pod) if err != nil { glog.Warningf("Failed to delete server pod: %v", err) ExpectNoError(err, "Failed to delete server pod: %v", err) @@ -438,9 +458,7 @@ func TestVolumeClient(client clientset.Interface, config VolumeTestConfig, fsGro } podsNamespacer := client.CoreV1().Pods(config.Namespace) - if fsGroup != nil { - clientPod.Spec.SecurityContext.FSGroup = fsGroup - } + clientPod.Spec.SecurityContext.FSGroup = fsGroup for i, test := range tests { volumeName := fmt.Sprintf("%s-%s-%d", config.Prefix, "volume", i) @@ -520,6 +538,7 @@ func InjectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.V VolumeSource: volume, }, }, + NodeName: config.ClientNodeName, NodeSelector: config.NodeSelector, }, } diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index b2940ed0c2e..29a0f599112 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -9,19 +9,18 @@ go_library( "ephemeral_volume.go", "flexvolume.go", "generic_persistent_volume-disruptive.go", + "in_tree_volumes.go", "mounted_volume_resize.go", "nfs_persistent_volume-disruptive.go", "pd.go", "persistent_volumes.go", "persistent_volumes-gce.go", "persistent_volumes-local.go", - "persistent_volumes-volumemode.go", "pv_protection.go", "pvc_protection.go", "regional_pd.go", "subpath.go", "volume_expand.go", - "volume_io.go", "volume_metrics.go", "volume_provisioning.go", "volumes.go", @@ -67,8 +66,9 @@ go_library( "//test/e2e/framework/metrics:go_default_library", "//test/e2e/generated:go_default_library", "//test/e2e/manifest:go_default_library", + "//test/e2e/storage/drivers:go_default_library", + "//test/e2e/storage/testsuites:go_default_library", "//test/e2e/storage/utils:go_default_library", - "//test/e2e/storage/vsphere:go_default_library", "//test/utils/image:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws:go_default_library", "//vendor/github.com/aws/aws-sdk-go/aws/session:go_default_library", @@ -92,6 +92,9 @@ filegroup( name = "all-srcs", srcs = [ ":package-srcs", + "//test/e2e/storage/drivers:all-srcs", + "//test/e2e/storage/testpatterns:all-srcs", + "//test/e2e/storage/testsuites:all-srcs", "//test/e2e/storage/utils:all-srcs", "//test/e2e/storage/vsphere:all-srcs", ], diff --git a/test/e2e/storage/drivers/BUILD b/test/e2e/storage/drivers/BUILD new file mode 100644 index 00000000000..21e72bc097d --- /dev/null +++ b/test/e2e/storage/drivers/BUILD @@ -0,0 +1,42 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "base.go", + "in_tree.go", + ], + importpath = "k8s.io/kubernetes/test/e2e/storage/drivers", + visibility = ["//visibility:public"], + deps = [ + "//pkg/kubelet/apis:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/api/rbac/v1beta1:go_default_library", + "//staging/src/k8s.io/api/storage/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//test/e2e/framework:go_default_library", + "//test/e2e/storage/testpatterns:go_default_library", + "//test/e2e/storage/vsphere:go_default_library", + "//test/utils/image:go_default_library", + "//vendor/github.com/onsi/ginkgo:go_default_library", + "//vendor/github.com/onsi/gomega:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/test/e2e/storage/drivers/base.go b/test/e2e/storage/drivers/base.go new file mode 100644 index 00000000000..f241ff750a0 --- /dev/null +++ b/test/e2e/storage/drivers/base.go @@ -0,0 +1,175 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package drivers + +import ( + "fmt" + + "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/testpatterns" +) + +// TestDriver represents an interface for a driver to be tested in TestSuite +type TestDriver interface { + // GetDriverInfo returns DriverInfo for the TestDriver + GetDriverInfo() *DriverInfo + // CreateDriver creates all driver resources that is required for TestDriver method + // except CreateVolume + CreateDriver() + // CreateDriver cleanup all the resources that is created in CreateDriver + CleanupDriver() + // SkipUnsupportedTest skips test in Testpattern is not suitable to test with the TestDriver + SkipUnsupportedTest(testpatterns.TestPattern) +} + +// PreprovisionedVolumeTestDriver represents an interface for a TestDriver that has pre-provisioned volume +type PreprovisionedVolumeTestDriver interface { + TestDriver + // CreateVolume creates a pre-provisioned volume. + CreateVolume(testpatterns.TestVolType) + // DeleteVolume deletes a volume that is created in CreateVolume + DeleteVolume(testpatterns.TestVolType) +} + +// InlineVolumeTestDriver represents an interface for a TestDriver that supports InlineVolume +type InlineVolumeTestDriver interface { + PreprovisionedVolumeTestDriver + // GetVolumeSource returns a volumeSource for inline volume. + // It will set readOnly and fsType to the volumeSource, if TestDriver supports both of them. + // It will return nil, if the TestDriver doesn't support either of the parameters. + GetVolumeSource(readOnly bool, fsType string) *v1.VolumeSource +} + +// PreprovisionedPVTestDriver represents an interface for a TestDriver that supports PreprovisionedPV +type PreprovisionedPVTestDriver interface { + PreprovisionedVolumeTestDriver + // GetPersistentVolumeSource returns a PersistentVolumeSource for pre-provisioned Persistent Volume. + // It will set readOnly and fsType to the PersistentVolumeSource, if TestDriver supports both of them. + // It will return nil, if the TestDriver doesn't support either of the parameters. + GetPersistentVolumeSource(readOnly bool, fsType string) *v1.PersistentVolumeSource +} + +// DynamicPVTestDriver represents an interface for a TestDriver that supports DynamicPV +type DynamicPVTestDriver interface { + TestDriver + // GetDynamicProvisionStorageClass returns a StorageClass dynamic provision Persistent Volume. + // It will set fsType to the StorageClass, if TestDriver supports it. + // It will return nil, if the TestDriver doesn't support it. + GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass +} + +// DriverInfo represents a combination of parameters to be used in implementation of TestDriver +type DriverInfo struct { + Name string // Name of the driver + FeatureTag string // FeatureTag for the driver + + MaxFileSize int64 // Max file size to be tested for this driver + SupportedFsType sets.String // Map of string for supported fs type + IsPersistent bool // Flag to represent whether it provides persistency + IsFsGroupSupported bool // Flag to represent whether it supports fsGroup + IsBlockSupported bool // Flag to represent whether it supports Block Volume + + // Parameters below will be set inside test loop by using SetCommonDriverParameters. + // Drivers that implement TestDriver is required to set all the above parameters + // and return DriverInfo on GetDriverInfo() call. + Framework *framework.Framework // Framework for the test + Config framework.VolumeTestConfig // VolumeTestConfig for thet test +} + +// GetDriverNameWithFeatureTags returns driver name with feature tags +// For example) +// - [Driver: nfs] +// - [Driver: rbd][Feature:Volumes] +func GetDriverNameWithFeatureTags(driver TestDriver) string { + dInfo := driver.GetDriverInfo() + + return fmt.Sprintf("[Driver: %s]%s", dInfo.Name, dInfo.FeatureTag) +} + +func CreateVolume(driver TestDriver, volType testpatterns.TestVolType) { + // Create Volume for test unless dynamicPV test + switch volType { + case testpatterns.InlineVolume: + fallthrough + case testpatterns.PreprovisionedPV: + if pDriver, ok := driver.(PreprovisionedVolumeTestDriver); ok { + pDriver.CreateVolume(volType) + } + case testpatterns.DynamicPV: + // No need to create volume + default: + framework.Failf("Invalid volType specified: %v", volType) + } +} + +func DeleteVolume(driver TestDriver, volType testpatterns.TestVolType) { + // Delete Volume for test unless dynamicPV test + switch volType { + case testpatterns.InlineVolume: + fallthrough + case testpatterns.PreprovisionedPV: + if pDriver, ok := driver.(PreprovisionedVolumeTestDriver); ok { + pDriver.DeleteVolume(volType) + } + case testpatterns.DynamicPV: + // No need to delete volume + default: + framework.Failf("Invalid volType specified: %v", volType) + } +} + +// SetCommonDriverParameters sets a common driver parameters to TestDriver +// This function is intended to be called in BeforeEach() inside test loop. +func SetCommonDriverParameters( + driver TestDriver, + f *framework.Framework, + config framework.VolumeTestConfig, +) { + dInfo := driver.GetDriverInfo() + + dInfo.Framework = f + dInfo.Config = config +} + +func getStorageClass( + provisioner string, + parameters map[string]string, + bindingMode *storagev1.VolumeBindingMode, + ns string, + suffix string, +) *storagev1.StorageClass { + if bindingMode == nil { + defaultBindingMode := storagev1.VolumeBindingImmediate + bindingMode = &defaultBindingMode + } + return &storagev1.StorageClass{ + TypeMeta: metav1.TypeMeta{ + Kind: "StorageClass", + }, + ObjectMeta: metav1.ObjectMeta{ + // Name must be unique, so let's base it on namespace name + Name: ns + "-" + suffix, + }, + Provisioner: provisioner, + Parameters: parameters, + VolumeBindingMode: bindingMode, + } +} diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go new file mode 100644 index 00000000000..f87a564ea56 --- /dev/null +++ b/test/e2e/storage/drivers/in_tree.go @@ -0,0 +1,1455 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* + * This file defines various in-tree volume test drivers for TestSuites. + * + * There are two ways, how to prepare test drivers: + * 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...) + * It creates a server pod which defines one volume for the tests. + * These tests work only when privileged containers are allowed, exporting + * various filesystems (NFS, GlusterFS, ...) usually needs some mounting or + * other privileged magic in the server pod. + * + * Note that the server containers are for testing purposes only and should not + * be used in production. + * + * 2) With server or cloud provider outside of Kubernetes (Cinder, GCE, AWS, Azure, ...) + * Appropriate server or cloud provider must exist somewhere outside + * the tested Kubernetes cluster. CreateVolume will create a new volume to be + * used in the TestSuites for inlineVolume or DynamicPV tests. + */ + +package drivers + +import ( + "fmt" + "math/rand" + "os/exec" + "strings" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/api/core/v1" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apiserver/pkg/authentication/serviceaccount" + clientset "k8s.io/client-go/kubernetes" + kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/testpatterns" + vspheretest "k8s.io/kubernetes/test/e2e/storage/vsphere" + imageutils "k8s.io/kubernetes/test/utils/image" +) + +// NFS +type nfsDriver struct { + serverIP string + serverPod *v1.Pod + externalProvisionerPod *v1.Pod + externalPluginName string + + driverInfo DriverInfo +} + +var _ TestDriver = &nfsDriver{} +var _ PreprovisionedVolumeTestDriver = &nfsDriver{} +var _ InlineVolumeTestDriver = &nfsDriver{} +var _ PreprovisionedPVTestDriver = &nfsDriver{} +var _ DynamicPVTestDriver = &nfsDriver{} + +// InitNFSDriver returns nfsDriver that implements TestDriver interface +func InitNFSDriver() TestDriver { + return &nfsDriver{ + driverInfo: DriverInfo{ + Name: "nfs", + MaxFileSize: testpatterns.FileSizeLarge, + SupportedFsType: sets.NewString( + "", // Default fsType + ), + IsPersistent: true, + IsFsGroupSupported: false, + IsBlockSupported: false, + }, + } +} + +func (n *nfsDriver) GetDriverInfo() *DriverInfo { + return &n.driverInfo +} + +func (n *nfsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +} + +func (n *nfsDriver) GetVolumeSource(readOnly bool, fsType string) *v1.VolumeSource { + return &v1.VolumeSource{ + NFS: &v1.NFSVolumeSource{ + Server: n.serverIP, + Path: "/", + ReadOnly: readOnly, + }, + } +} + +func (n *nfsDriver) GetPersistentVolumeSource(readOnly bool, fsType string) *v1.PersistentVolumeSource { + return &v1.PersistentVolumeSource{ + NFS: &v1.NFSVolumeSource{ + Server: n.serverIP, + Path: "/", + ReadOnly: readOnly, + }, + } +} + +func (n *nfsDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { + provisioner := n.externalPluginName + parameters := map[string]string{"mountOptions": "vers=4.1"} + ns := n.driverInfo.Framework.Namespace.Name + suffix := fmt.Sprintf("%s-sc", n.driverInfo.Name) + + return getStorageClass(provisioner, parameters, nil, ns, suffix) +} + +func (n *nfsDriver) CreateDriver() { + f := n.driverInfo.Framework + cs := f.ClientSet + ns := f.Namespace + n.externalPluginName = fmt.Sprintf("example.com/nfs-%s", ns.Name) + + // TODO(mkimuram): cluster-admin gives too much right but system:persistent-volume-provisioner + // is not enough. We should create new clusterrole for testing. + framework.BindClusterRole(cs.RbacV1beta1(), "cluster-admin", ns.Name, + rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: ns.Name, Name: "default"}) + + err := framework.WaitForAuthorizationUpdate(cs.AuthorizationV1beta1(), + serviceaccount.MakeUsername(ns.Name, "default"), + "", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true) + framework.ExpectNoError(err, "Failed to update authorization: %v", err) + + By("creating an external dynamic provisioner pod") + n.externalProvisionerPod = startExternalProvisioner(cs, ns.Name, n.externalPluginName) +} + +func startExternalProvisioner(c clientset.Interface, ns string, externalPluginName string) *v1.Pod { + podClient := c.CoreV1().Pods(ns) + + provisionerPod := &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "external-provisioner-", + }, + + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "nfs-provisioner", + Image: "quay.io/kubernetes_incubator/nfs-provisioner:v1.0.9", + SecurityContext: &v1.SecurityContext{ + Capabilities: &v1.Capabilities{ + Add: []v1.Capability{"DAC_READ_SEARCH"}, + }, + }, + Args: []string{ + "-provisioner=" + externalPluginName, + "-grace-period=0", + }, + Ports: []v1.ContainerPort{ + {Name: "nfs", ContainerPort: 2049}, + {Name: "mountd", ContainerPort: 20048}, + {Name: "rpcbind", ContainerPort: 111}, + {Name: "rpcbind-udp", ContainerPort: 111, Protocol: v1.ProtocolUDP}, + }, + Env: []v1.EnvVar{ + { + Name: "POD_IP", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + FieldPath: "status.podIP", + }, + }, + }, + }, + ImagePullPolicy: v1.PullIfNotPresent, + VolumeMounts: []v1.VolumeMount{ + { + Name: "export-volume", + MountPath: "/export", + }, + }, + }, + }, + Volumes: []v1.Volume{ + { + Name: "export-volume", + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + }, + }, + }, + }, + } + provisionerPod, err := podClient.Create(provisionerPod) + framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err) + + framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, provisionerPod)) + + By("locating the provisioner pod") + pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{}) + framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err) + + return pod +} + +func (n *nfsDriver) CleanupDriver() { + f := n.driverInfo.Framework + cs := f.ClientSet + ns := f.Namespace + + framework.ExpectNoError(framework.DeletePodWithWait(f, cs, n.externalProvisionerPod)) + clusterRoleBindingName := ns.Name + "--" + "cluster-admin" + cs.RbacV1beta1().ClusterRoleBindings().Delete(clusterRoleBindingName, metav1.NewDeleteOptions(0)) +} + +func (n *nfsDriver) CreateVolume(volType testpatterns.TestVolType) { + f := n.driverInfo.Framework + cs := f.ClientSet + ns := f.Namespace + + // NewNFSServer creates a pod for InlineVolume and PreprovisionedPV, + // and startExternalProvisioner creates a pods for DynamicPV. + // Therefore, we need a different CreateDriver logic for volType. + switch volType { + case testpatterns.InlineVolume: + fallthrough + case testpatterns.PreprovisionedPV: + n.driverInfo.Config, n.serverPod, n.serverIP = framework.NewNFSServer(cs, ns.Name, []string{}) + case testpatterns.DynamicPV: + // Do nothing + default: + framework.Failf("Unsupported volType:%v is specified", volType) + } +} + +func (n *nfsDriver) DeleteVolume(volType testpatterns.TestVolType) { + f := n.driverInfo.Framework + + switch volType { + case testpatterns.InlineVolume: + fallthrough + case testpatterns.PreprovisionedPV: + framework.CleanUpVolumeServer(f, n.serverPod) + case testpatterns.DynamicPV: + // Do nothing + default: + framework.Failf("Unsupported volType:%v is specified", volType) + } +} + +// Gluster +type glusterFSDriver struct { + serverIP string + serverPod *v1.Pod + + driverInfo DriverInfo +} + +var _ TestDriver = &glusterFSDriver{} +var _ PreprovisionedVolumeTestDriver = &glusterFSDriver{} +var _ InlineVolumeTestDriver = &glusterFSDriver{} +var _ PreprovisionedPVTestDriver = &glusterFSDriver{} + +// InitGlusterFSDriver returns glusterFSDriver that implements TestDriver interface +func InitGlusterFSDriver() TestDriver { + return &glusterFSDriver{ + driverInfo: DriverInfo{ + Name: "gluster", + MaxFileSize: testpatterns.FileSizeMedium, + SupportedFsType: sets.NewString( + "", // Default fsType + ), + IsPersistent: true, + IsFsGroupSupported: false, + IsBlockSupported: false, + }, + } +} + +func (g *glusterFSDriver) GetDriverInfo() *DriverInfo { + return &g.driverInfo +} + +func (g *glusterFSDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { + framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu", "custom") + if pattern.FsType == "xfs" { + framework.SkipUnlessNodeOSDistroIs("ubuntu", "custom") + } +} + +func (g *glusterFSDriver) GetVolumeSource(readOnly bool, fsType string) *v1.VolumeSource { + name := g.driverInfo.Config.Prefix + "-server" + return &v1.VolumeSource{ + Glusterfs: &v1.GlusterfsVolumeSource{ + EndpointsName: name, + // 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh + Path: "test_vol", + ReadOnly: readOnly, + }, + } +} + +func (g *glusterFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string) *v1.PersistentVolumeSource { + name := g.driverInfo.Config.Prefix + "-server" + return &v1.PersistentVolumeSource{ + Glusterfs: &v1.GlusterfsVolumeSource{ + EndpointsName: name, + // 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh + Path: "test_vol", + ReadOnly: readOnly, + }, + } +} + +func (g *glusterFSDriver) CreateDriver() { +} + +func (g *glusterFSDriver) CleanupDriver() { +} + +func (g *glusterFSDriver) CreateVolume(volType testpatterns.TestVolType) { + f := g.driverInfo.Framework + cs := f.ClientSet + ns := f.Namespace + + g.driverInfo.Config, g.serverPod, g.serverIP = framework.NewGlusterfsServer(cs, ns.Name) +} + +func (g *glusterFSDriver) DeleteVolume(volType testpatterns.TestVolType) { + f := g.driverInfo.Framework + cs := f.ClientSet + ns := f.Namespace + + name := g.driverInfo.Config.Prefix + "-server" + + framework.Logf("Deleting Gluster endpoints %q...", name) + epErr := cs.CoreV1().Endpoints(ns.Name).Delete(name, nil) + framework.Logf("Deleting Gluster server pod %q...", g.serverPod.Name) + err := framework.DeletePodWithWait(f, cs, g.serverPod) + if epErr != nil || err != nil { + if epErr != nil { + framework.Logf("Gluster delete endpoints failed: %v", err) + } + if err != nil { + framework.Logf("Gluster server pod delete failed: %v", err) + } + framework.Failf("Cleanup failed") + } +} + +// iSCSI +// The iscsiadm utility and iscsi target kernel modules must be installed on all nodes. +type iSCSIDriver struct { + serverIP string + serverPod *v1.Pod + + driverInfo DriverInfo +} + +var _ TestDriver = &iSCSIDriver{} +var _ PreprovisionedVolumeTestDriver = &iSCSIDriver{} +var _ InlineVolumeTestDriver = &iSCSIDriver{} +var _ PreprovisionedPVTestDriver = &iSCSIDriver{} + +// InitISCSIDriver returns iSCSIDriver that implements TestDriver interface +func InitISCSIDriver() TestDriver { + return &iSCSIDriver{ + driverInfo: DriverInfo{ + Name: "iscsi", + FeatureTag: "[Feature:Volumes]", + MaxFileSize: testpatterns.FileSizeMedium, + SupportedFsType: sets.NewString( + "", // Default fsType + "ext2", + // TODO: fix iSCSI driver can work with ext3 + //"ext3", + "ext4", + ), + IsPersistent: true, + IsFsGroupSupported: true, + IsBlockSupported: true, + }, + } +} + +func (i *iSCSIDriver) GetDriverInfo() *DriverInfo { + return &i.driverInfo +} + +func (i *iSCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +} + +func (i *iSCSIDriver) GetVolumeSource(readOnly bool, fsType string) *v1.VolumeSource { + volSource := v1.VolumeSource{ + ISCSI: &v1.ISCSIVolumeSource{ + TargetPortal: i.serverIP + ":3260", + // from test/images/volume/iscsi/initiatorname.iscsi + IQN: "iqn.2003-01.org.linux-iscsi.f21.x8664:sn.4b0aae584f7c", + Lun: 0, + ReadOnly: readOnly, + }, + } + if fsType != "" { + volSource.ISCSI.FSType = fsType + } + return &volSource +} + +func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string) *v1.PersistentVolumeSource { + pvSource := v1.PersistentVolumeSource{ + ISCSI: &v1.ISCSIPersistentVolumeSource{ + TargetPortal: i.serverIP + ":3260", + IQN: "iqn.2003-01.org.linux-iscsi.f21.x8664:sn.4b0aae584f7c", + Lun: 0, + ReadOnly: readOnly, + }, + } + if fsType != "" { + pvSource.ISCSI.FSType = fsType + } + return &pvSource +} + +func (i *iSCSIDriver) CreateDriver() { +} + +func (i *iSCSIDriver) CleanupDriver() { +} + +func (i *iSCSIDriver) CreateVolume(volType testpatterns.TestVolType) { + f := i.driverInfo.Framework + cs := f.ClientSet + ns := f.Namespace + + i.driverInfo.Config, i.serverPod, i.serverIP = framework.NewISCSIServer(cs, ns.Name) +} + +func (i *iSCSIDriver) DeleteVolume(volType testpatterns.TestVolType) { + f := i.driverInfo.Framework + + framework.CleanUpVolumeServer(f, i.serverPod) +} + +// Ceph RBD +type rbdDriver struct { + serverIP string + serverPod *v1.Pod + secret *v1.Secret + + driverInfo DriverInfo +} + +var _ TestDriver = &rbdDriver{} +var _ PreprovisionedVolumeTestDriver = &rbdDriver{} +var _ InlineVolumeTestDriver = &rbdDriver{} +var _ PreprovisionedPVTestDriver = &rbdDriver{} + +// InitRbdDriver returns rbdDriver that implements TestDriver interface +func InitRbdDriver() TestDriver { + return &rbdDriver{ + driverInfo: DriverInfo{ + Name: "rbd", + FeatureTag: "[Feature:Volumes]", + MaxFileSize: testpatterns.FileSizeMedium, + SupportedFsType: sets.NewString( + "", // Default fsType + "ext2", + // TODO: fix rbd driver can work with ext3 + //"ext3", + "ext4", + ), + IsPersistent: true, + IsFsGroupSupported: true, + IsBlockSupported: true}, + } +} + +func (r *rbdDriver) GetDriverInfo() *DriverInfo { + return &r.driverInfo +} + +func (r *rbdDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +} + +func (r *rbdDriver) GetVolumeSource(readOnly bool, fsType string) *v1.VolumeSource { + volSource := v1.VolumeSource{ + RBD: &v1.RBDVolumeSource{ + CephMonitors: []string{r.serverIP}, + RBDPool: "rbd", + RBDImage: "foo", + RadosUser: "admin", + SecretRef: &v1.LocalObjectReference{ + Name: r.secret.Name, + }, + ReadOnly: readOnly, + }, + } + if fsType != "" { + volSource.RBD.FSType = fsType + } + return &volSource +} + +func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string) *v1.PersistentVolumeSource { + f := r.driverInfo.Framework + ns := f.Namespace + pvSource := v1.PersistentVolumeSource{ + RBD: &v1.RBDPersistentVolumeSource{ + CephMonitors: []string{r.serverIP}, + RBDPool: "rbd", + RBDImage: "foo", + RadosUser: "admin", + SecretRef: &v1.SecretReference{ + Name: r.secret.Name, + Namespace: ns.Name, + }, + ReadOnly: readOnly, + }, + } + if fsType != "" { + pvSource.RBD.FSType = fsType + } + return &pvSource +} + +func (r *rbdDriver) CreateDriver() { +} + +func (r *rbdDriver) CleanupDriver() { +} + +func (r *rbdDriver) CreateVolume(volType testpatterns.TestVolType) { + f := r.driverInfo.Framework + cs := f.ClientSet + ns := f.Namespace + + r.driverInfo.Config, r.serverPod, r.secret, r.serverIP = framework.NewRBDServer(cs, ns.Name) +} + +func (r *rbdDriver) DeleteVolume(volType testpatterns.TestVolType) { + f := r.driverInfo.Framework + + framework.CleanUpVolumeServerWithSecret(f, r.serverPod, r.secret) +} + +// Ceph +type cephFSDriver struct { + serverIP string + serverPod *v1.Pod + secret *v1.Secret + + driverInfo DriverInfo +} + +var _ TestDriver = &cephFSDriver{} +var _ PreprovisionedVolumeTestDriver = &cephFSDriver{} +var _ InlineVolumeTestDriver = &cephFSDriver{} +var _ PreprovisionedPVTestDriver = &cephFSDriver{} + +// InitCephFSDriver returns cephFSDriver that implements TestDriver interface +func InitCephFSDriver() TestDriver { + return &cephFSDriver{ + driverInfo: DriverInfo{ + Name: "ceph", + FeatureTag: "[Feature:Volumes]", + MaxFileSize: testpatterns.FileSizeMedium, + SupportedFsType: sets.NewString( + "", // Default fsType + ), + IsPersistent: true, + IsFsGroupSupported: false, + IsBlockSupported: false, + }, + } +} + +func (c *cephFSDriver) GetDriverInfo() *DriverInfo { + return &c.driverInfo +} + +func (c *cephFSDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +} + +func (c *cephFSDriver) GetVolumeSource(readOnly bool, fsType string) *v1.VolumeSource { + return &v1.VolumeSource{ + CephFS: &v1.CephFSVolumeSource{ + Monitors: []string{c.serverIP + ":6789"}, + User: "kube", + SecretRef: &v1.LocalObjectReference{ + Name: c.secret.Name, + }, + ReadOnly: readOnly, + }, + } +} + +func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string) *v1.PersistentVolumeSource { + f := c.driverInfo.Framework + ns := f.Namespace + + return &v1.PersistentVolumeSource{ + CephFS: &v1.CephFSPersistentVolumeSource{ + Monitors: []string{c.serverIP + ":6789"}, + User: "kube", + SecretRef: &v1.SecretReference{ + Name: c.secret.Name, + Namespace: ns.Name, + }, + ReadOnly: readOnly, + }, + } +} + +func (c *cephFSDriver) CreateDriver() { +} + +func (c *cephFSDriver) CleanupDriver() { +} + +func (c *cephFSDriver) CreateVolume(volType testpatterns.TestVolType) { + f := c.driverInfo.Framework + cs := f.ClientSet + ns := f.Namespace + + c.driverInfo.Config, c.serverPod, c.secret, c.serverIP = framework.NewRBDServer(cs, ns.Name) +} + +func (c *cephFSDriver) DeleteVolume(volType testpatterns.TestVolType) { + f := c.driverInfo.Framework + + framework.CleanUpVolumeServerWithSecret(f, c.serverPod, c.secret) +} + +// Hostpath +type hostpathDriver struct { + node v1.Node + + driverInfo DriverInfo +} + +var _ TestDriver = &hostpathDriver{} +var _ PreprovisionedVolumeTestDriver = &hostpathDriver{} +var _ InlineVolumeTestDriver = &hostpathDriver{} + +// InitHostpathDriver returns hostpathDriver that implements TestDriver interface +func InitHostpathDriver() TestDriver { + return &hostpathDriver{ + driverInfo: DriverInfo{ + Name: "hostpath", + MaxFileSize: testpatterns.FileSizeMedium, + SupportedFsType: sets.NewString( + "", // Default fsType + ), + IsPersistent: true, + IsFsGroupSupported: false, + IsBlockSupported: false, + }, + } +} + +func (h *hostpathDriver) GetDriverInfo() *DriverInfo { + return &h.driverInfo +} + +func (h *hostpathDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +} + +func (h *hostpathDriver) GetVolumeSource(readOnly bool, fsType string) *v1.VolumeSource { + // hostpath doesn't support readOnly volume + if readOnly { + return nil + } + return &v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{ + Path: "/tmp", + }, + } +} + +func (h *hostpathDriver) CreateDriver() { +} + +func (h *hostpathDriver) CleanupDriver() { +} + +func (h *hostpathDriver) CreateVolume(volType testpatterns.TestVolType) { + f := h.driverInfo.Framework + cs := f.ClientSet + + // pods should be scheduled on the node + nodes := framework.GetReadySchedulableNodesOrDie(cs) + node := nodes.Items[rand.Intn(len(nodes.Items))] + h.driverInfo.Config.ClientNodeName = node.Name +} + +func (h *hostpathDriver) DeleteVolume(volType testpatterns.TestVolType) { +} + +// HostpathSymlink +type hostpathSymlinkDriver struct { + node v1.Node + sourcePath string + targetPath string + prepPod *v1.Pod + + driverInfo DriverInfo +} + +var _ TestDriver = &hostpathSymlinkDriver{} +var _ PreprovisionedVolumeTestDriver = &hostpathSymlinkDriver{} +var _ InlineVolumeTestDriver = &hostpathSymlinkDriver{} + +// InitHostpathSymlinkDriver returns hostpathSymlinkDriver that implements TestDriver interface +func InitHostpathSymlinkDriver() TestDriver { + return &hostpathSymlinkDriver{ + driverInfo: DriverInfo{ + Name: "hostpathSymlink", + MaxFileSize: testpatterns.FileSizeMedium, + SupportedFsType: sets.NewString( + "", // Default fsType + ), + IsPersistent: true, + IsFsGroupSupported: false, + IsBlockSupported: false, + }, + } +} + +func (h *hostpathSymlinkDriver) GetDriverInfo() *DriverInfo { + return &h.driverInfo +} + +func (h *hostpathSymlinkDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +} + +func (h *hostpathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string) *v1.VolumeSource { + // hostpath doesn't support readOnly volume + if readOnly { + return nil + } + return &v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{ + Path: h.targetPath, + }, + } +} + +func (h *hostpathSymlinkDriver) CreateDriver() { +} + +func (h *hostpathSymlinkDriver) CleanupDriver() { +} + +func (h *hostpathSymlinkDriver) CreateVolume(volType testpatterns.TestVolType) { + f := h.driverInfo.Framework + cs := f.ClientSet + + h.sourcePath = fmt.Sprintf("/tmp/%v", f.Namespace.Name) + h.targetPath = fmt.Sprintf("/tmp/%v-link", f.Namespace.Name) + volumeName := "test-volume" + + // pods should be scheduled on the node + nodes := framework.GetReadySchedulableNodesOrDie(cs) + node := nodes.Items[rand.Intn(len(nodes.Items))] + h.driverInfo.Config.ClientNodeName = node.Name + + cmd := fmt.Sprintf("mkdir %v -m 777 && ln -s %v %v", h.sourcePath, h.sourcePath, h.targetPath) + privileged := true + + // Launch pod to initialize hostpath directory and symlink + h.prepPod = &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("hostpath-symlink-prep-%s", f.Namespace.Name), + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: fmt.Sprintf("init-volume-%s", f.Namespace.Name), + Image: imageutils.GetE2EImage(imageutils.BusyBox), + Command: []string{"/bin/sh", "-ec", cmd}, + VolumeMounts: []v1.VolumeMount{ + { + Name: volumeName, + MountPath: "/tmp", + }, + }, + SecurityContext: &v1.SecurityContext{ + Privileged: &privileged, + }, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + Volumes: []v1.Volume{ + { + Name: volumeName, + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{ + Path: "/tmp", + }, + }, + }, + }, + NodeName: node.Name, + }, + } + // h.prepPod will be reused in cleanupDriver. + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(h.prepPod) + Expect(err).ToNot(HaveOccurred(), "while creating hostpath init pod") + + err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) + Expect(err).ToNot(HaveOccurred(), "while waiting for hostpath init pod to succeed") + + err = framework.DeletePodWithWait(f, f.ClientSet, pod) + Expect(err).ToNot(HaveOccurred(), "while deleting hostpath init pod") +} + +func (h *hostpathSymlinkDriver) DeleteVolume(volType testpatterns.TestVolType) { + f := h.driverInfo.Framework + + cmd := fmt.Sprintf("rm -rf %v&& rm -rf %v", h.targetPath, h.sourcePath) + h.prepPod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", cmd} + + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(h.prepPod) + Expect(err).ToNot(HaveOccurred(), "while creating hostpath teardown pod") + + err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) + Expect(err).ToNot(HaveOccurred(), "while waiting for hostpath teardown pod to succeed") + + err = framework.DeletePodWithWait(f, f.ClientSet, pod) + Expect(err).ToNot(HaveOccurred(), "while deleting hostpath teardown pod") +} + +// emptydir +type emptydirDriver struct { + driverInfo DriverInfo +} + +var _ TestDriver = &emptydirDriver{} +var _ PreprovisionedVolumeTestDriver = &emptydirDriver{} +var _ InlineVolumeTestDriver = &emptydirDriver{} + +// InitEmptydirDriver returns emptydirDriver that implements TestDriver interface +func InitEmptydirDriver() TestDriver { + return &emptydirDriver{ + driverInfo: DriverInfo{ + Name: "emptydir", + MaxFileSize: testpatterns.FileSizeMedium, + SupportedFsType: sets.NewString( + "", // Default fsType + ), + IsPersistent: false, + IsFsGroupSupported: false, + IsBlockSupported: false, + }, + } +} + +func (e *emptydirDriver) GetDriverInfo() *DriverInfo { + return &e.driverInfo +} + +func (e *emptydirDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { +} + +func (e *emptydirDriver) GetVolumeSource(readOnly bool, fsType string) *v1.VolumeSource { + // emptydir doesn't support readOnly volume + if readOnly { + return nil + } + return &v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + } +} + +func (e *emptydirDriver) CreateVolume(volType testpatterns.TestVolType) { +} + +func (e *emptydirDriver) DeleteVolume(volType testpatterns.TestVolType) { +} + +func (e *emptydirDriver) CreateDriver() { +} + +func (e *emptydirDriver) CleanupDriver() { +} + +// Cinder +// This driver assumes that OpenStack client tools are installed +// (/usr/bin/nova, /usr/bin/cinder and /usr/bin/keystone) +// and that the usual OpenStack authentication env. variables are set +// (OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME at least). +type cinderDriver struct { + volumeName string + volumeID string + + driverInfo DriverInfo +} + +var _ TestDriver = &cinderDriver{} +var _ PreprovisionedVolumeTestDriver = &cinderDriver{} +var _ InlineVolumeTestDriver = &cinderDriver{} +var _ PreprovisionedPVTestDriver = &cinderDriver{} +var _ DynamicPVTestDriver = &cinderDriver{} + +// InitCinderDriver returns cinderDriver that implements TestDriver interface +func InitCinderDriver() TestDriver { + return &cinderDriver{ + driverInfo: DriverInfo{ + Name: "cinder", + MaxFileSize: testpatterns.FileSizeMedium, + SupportedFsType: sets.NewString( + "", // Default fsType + "ext3", + ), + IsPersistent: true, + IsFsGroupSupported: true, + IsBlockSupported: false, + }, + } +} + +func (c *cinderDriver) GetDriverInfo() *DriverInfo { + return &c.driverInfo +} + +func (c *cinderDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { + framework.SkipUnlessProviderIs("openstack") +} + +func (c *cinderDriver) GetVolumeSource(readOnly bool, fsType string) *v1.VolumeSource { + volSource := v1.VolumeSource{ + Cinder: &v1.CinderVolumeSource{ + VolumeID: c.volumeID, + ReadOnly: readOnly, + }, + } + if fsType != "" { + volSource.Cinder.FSType = fsType + } + return &volSource +} + +func (c *cinderDriver) GetPersistentVolumeSource(readOnly bool, fsType string) *v1.PersistentVolumeSource { + pvSource := v1.PersistentVolumeSource{ + Cinder: &v1.CinderPersistentVolumeSource{ + VolumeID: c.volumeID, + ReadOnly: readOnly, + }, + } + if fsType != "" { + pvSource.Cinder.FSType = fsType + } + return &pvSource +} + +func (c *cinderDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { + provisioner := "kubernetes.io/cinder" + parameters := map[string]string{} + if fsType != "" { + parameters["fsType"] = fsType + } + ns := c.driverInfo.Framework.Namespace.Name + suffix := fmt.Sprintf("%s-sc", c.driverInfo.Name) + + return getStorageClass(provisioner, parameters, nil, ns, suffix) +} + +func (c *cinderDriver) CreateDriver() { +} + +func (c *cinderDriver) CleanupDriver() { +} + +func (c *cinderDriver) CreateVolume(volType testpatterns.TestVolType) { + f := c.driverInfo.Framework + ns := f.Namespace + + // We assume that namespace.Name is a random string + c.volumeName = ns.Name + By("creating a test Cinder volume") + output, err := exec.Command("cinder", "create", "--display-name="+c.volumeName, "1").CombinedOutput() + outputString := string(output[:]) + framework.Logf("cinder output:\n%s", outputString) + Expect(err).NotTo(HaveOccurred()) + + // Parse 'id'' from stdout. Expected format: + // | attachments | [] | + // | availability_zone | nova | + // ... + // | id | 1d6ff08f-5d1c-41a4-ad72-4ef872cae685 | + c.volumeID = "" + for _, line := range strings.Split(outputString, "\n") { + fields := strings.Fields(line) + if len(fields) != 5 { + continue + } + if fields[1] != "id" { + continue + } + c.volumeID = fields[3] + break + } + framework.Logf("Volume ID: %s", c.volumeID) + Expect(c.volumeID).NotTo(Equal("")) +} + +func (c *cinderDriver) DeleteVolume(volType testpatterns.TestVolType) { + deleteCinderVolume(c.volumeName) +} + +func deleteCinderVolume(name string) error { + // Try to delete the volume for several seconds - it takes + // a while for the plugin to detach it. + var output []byte + var err error + timeout := time.Second * 120 + + framework.Logf("Waiting up to %v for removal of cinder volume %s", timeout, name) + for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) { + output, err = exec.Command("cinder", "delete", name).CombinedOutput() + if err == nil { + framework.Logf("Cinder volume %s deleted", name) + return nil + } + framework.Logf("Failed to delete volume %s: %v", name, err) + } + framework.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:])) + return err +} + +// GCE +type gceDriver struct { + volumeName string + + driverInfo DriverInfo +} + +var _ TestDriver = &gceDriver{} +var _ PreprovisionedVolumeTestDriver = &gceDriver{} +var _ InlineVolumeTestDriver = &gceDriver{} +var _ PreprovisionedPVTestDriver = &gceDriver{} +var _ DynamicPVTestDriver = &gceDriver{} + +// InitGceDriver returns gceDriver that implements TestDriver interface +func InitGceDriver() TestDriver { + return &gceDriver{ + driverInfo: DriverInfo{ + Name: "gce", + MaxFileSize: testpatterns.FileSizeMedium, + SupportedFsType: sets.NewString( + "", // Default fsType + "ext2", + "ext3", + "ext4", + "xfs", + ), + IsPersistent: true, + IsFsGroupSupported: true, + IsBlockSupported: true, + }, + } +} + +func (g *gceDriver) GetDriverInfo() *DriverInfo { + return &g.driverInfo +} + +func (g *gceDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { + framework.SkipUnlessProviderIs("gce", "gke") + if pattern.FsType == "xfs" { + framework.SkipUnlessNodeOSDistroIs("ubuntu", "custom") + } +} + +func (g *gceDriver) GetVolumeSource(readOnly bool, fsType string) *v1.VolumeSource { + volSource := v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ + PDName: g.volumeName, + ReadOnly: readOnly, + }, + } + if fsType != "" { + volSource.GCEPersistentDisk.FSType = fsType + } + return &volSource +} + +func (g *gceDriver) GetPersistentVolumeSource(readOnly bool, fsType string) *v1.PersistentVolumeSource { + pvSource := v1.PersistentVolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ + PDName: g.volumeName, + ReadOnly: readOnly, + }, + } + if fsType != "" { + pvSource.GCEPersistentDisk.FSType = fsType + } + return &pvSource +} + +func (g *gceDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { + provisioner := "kubernetes.io/gce-pd" + parameters := map[string]string{} + if fsType != "" { + parameters["fsType"] = fsType + } + ns := g.driverInfo.Framework.Namespace.Name + suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name) + + return getStorageClass(provisioner, parameters, nil, ns, suffix) +} + +func (g *gceDriver) CreateDriver() { +} + +func (g *gceDriver) CleanupDriver() { +} + +func (g *gceDriver) CreateVolume(volType testpatterns.TestVolType) { + if volType == testpatterns.InlineVolume { + // PD will be created in framework.TestContext.CloudConfig.Zone zone, + // so pods should be also scheduled there. + g.driverInfo.Config.NodeSelector = map[string]string{ + kubeletapis.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone, + } + } + By("creating a test gce pd volume") + var err error + g.volumeName, err = framework.CreatePDWithRetry() + Expect(err).NotTo(HaveOccurred()) +} + +func (g *gceDriver) DeleteVolume(volType testpatterns.TestVolType) { + framework.DeletePDWithRetry(g.volumeName) +} + +// vSphere +type vSphereDriver struct { + volumePath string + nodeInfo *vspheretest.NodeInfo + + driverInfo DriverInfo +} + +var _ TestDriver = &vSphereDriver{} +var _ PreprovisionedVolumeTestDriver = &vSphereDriver{} +var _ InlineVolumeTestDriver = &vSphereDriver{} +var _ PreprovisionedPVTestDriver = &vSphereDriver{} +var _ DynamicPVTestDriver = &vSphereDriver{} + +// InitVSphereDriver returns vSphereDriver that implements TestDriver interface +func InitVSphereDriver() TestDriver { + return &vSphereDriver{ + driverInfo: DriverInfo{ + Name: "vSphere", + MaxFileSize: testpatterns.FileSizeMedium, + SupportedFsType: sets.NewString( + "", // Default fsType + "ext4", + ), + IsPersistent: true, + IsFsGroupSupported: true, + IsBlockSupported: false, + }, + } +} +func (v *vSphereDriver) GetDriverInfo() *DriverInfo { + return &v.driverInfo +} + +func (v *vSphereDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { + framework.SkipUnlessProviderIs("vsphere") +} + +func (v *vSphereDriver) GetVolumeSource(readOnly bool, fsType string) *v1.VolumeSource { + // vSphere driver doesn't seem to support readOnly volume + // TODO: check if it is correct + if readOnly { + return nil + } + volSource := v1.VolumeSource{ + VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{ + VolumePath: v.volumePath, + }, + } + if fsType != "" { + volSource.VsphereVolume.FSType = fsType + } + return &volSource +} + +func (v *vSphereDriver) GetPersistentVolumeSource(readOnly bool, fsType string) *v1.PersistentVolumeSource { + // vSphere driver doesn't seem to support readOnly volume + // TODO: check if it is correct + if readOnly { + return nil + } + pvSource := v1.PersistentVolumeSource{ + VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{ + VolumePath: v.volumePath, + }, + } + if fsType != "" { + pvSource.VsphereVolume.FSType = fsType + } + return &pvSource +} + +func (v *vSphereDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { + provisioner := "kubernetes.io/vsphere-volume" + parameters := map[string]string{} + if fsType != "" { + parameters["fsType"] = fsType + } + ns := v.driverInfo.Framework.Namespace.Name + suffix := fmt.Sprintf("%s-sc", v.driverInfo.Name) + + return getStorageClass(provisioner, parameters, nil, ns, suffix) +} + +func (v *vSphereDriver) CreateDriver() { +} + +func (v *vSphereDriver) CleanupDriver() { +} + +func (v *vSphereDriver) CreateVolume(volType testpatterns.TestVolType) { + f := v.driverInfo.Framework + vspheretest.Bootstrap(f) + v.nodeInfo = vspheretest.GetReadySchedulableRandomNodeInfo() + var err error + v.volumePath, err = v.nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, v.nodeInfo.DataCenterRef) + Expect(err).NotTo(HaveOccurred()) +} + +func (v *vSphereDriver) DeleteVolume(volType testpatterns.TestVolType) { + v.nodeInfo.VSphere.DeleteVolume(v.volumePath, v.nodeInfo.DataCenterRef) +} + +// Azure +type azureDriver struct { + volumeName string + + driverInfo DriverInfo +} + +var _ TestDriver = &azureDriver{} +var _ PreprovisionedVolumeTestDriver = &azureDriver{} +var _ InlineVolumeTestDriver = &azureDriver{} +var _ PreprovisionedPVTestDriver = &azureDriver{} +var _ DynamicPVTestDriver = &azureDriver{} + +// InitAzureDriver returns azureDriver that implements TestDriver interface +func InitAzureDriver() TestDriver { + return &azureDriver{ + driverInfo: DriverInfo{ + Name: "azure", + MaxFileSize: testpatterns.FileSizeMedium, + SupportedFsType: sets.NewString( + "", // Default fsType + "ext4", + ), + IsPersistent: true, + IsFsGroupSupported: true, + IsBlockSupported: true, + }, + } +} + +func (a *azureDriver) GetDriverInfo() *DriverInfo { + return &a.driverInfo +} + +func (a *azureDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { + framework.SkipUnlessProviderIs("azure") +} + +func (a *azureDriver) GetVolumeSource(readOnly bool, fsType string) *v1.VolumeSource { + diskName := a.volumeName[(strings.LastIndex(a.volumeName, "/") + 1):] + + volSource := v1.VolumeSource{ + AzureDisk: &v1.AzureDiskVolumeSource{ + DiskName: diskName, + DataDiskURI: a.volumeName, + ReadOnly: &readOnly, + }, + } + if fsType != "" { + volSource.AzureDisk.FSType = &fsType + } + return &volSource +} + +func (a *azureDriver) GetPersistentVolumeSource(readOnly bool, fsType string) *v1.PersistentVolumeSource { + diskName := a.volumeName[(strings.LastIndex(a.volumeName, "/") + 1):] + + pvSource := v1.PersistentVolumeSource{ + AzureDisk: &v1.AzureDiskVolumeSource{ + DiskName: diskName, + DataDiskURI: a.volumeName, + ReadOnly: &readOnly, + }, + } + if fsType != "" { + pvSource.AzureDisk.FSType = &fsType + } + return &pvSource +} + +func (a *azureDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { + provisioner := "kubernetes.io/azure-disk" + parameters := map[string]string{} + if fsType != "" { + parameters["fsType"] = fsType + } + ns := a.driverInfo.Framework.Namespace.Name + suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name) + + return getStorageClass(provisioner, parameters, nil, ns, suffix) +} + +func (a *azureDriver) CreateDriver() { +} + +func (a *azureDriver) CleanupDriver() { +} + +func (a *azureDriver) CreateVolume(volType testpatterns.TestVolType) { + By("creating a test azure disk volume") + var err error + a.volumeName, err = framework.CreatePDWithRetry() + Expect(err).NotTo(HaveOccurred()) +} + +func (a *azureDriver) DeleteVolume(volType testpatterns.TestVolType) { + framework.DeletePDWithRetry(a.volumeName) +} + +// AWS +type awsDriver struct { + volumeName string + + driverInfo DriverInfo +} + +var _ TestDriver = &awsDriver{} + +// TODO: Fix authorization error in attach operation and uncomment below +//var _ PreprovisionedVolumeTestDriver = &awsDriver{} +//var _ InlineVolumeTestDriver = &awsDriver{} +//var _ PreprovisionedPVTestDriver = &awsDriver{} +var _ DynamicPVTestDriver = &awsDriver{} + +// InitAwsDriver returns awsDriver that implements TestDriver interface +func InitAwsDriver() TestDriver { + return &awsDriver{ + driverInfo: DriverInfo{ + Name: "aws", + MaxFileSize: testpatterns.FileSizeMedium, + SupportedFsType: sets.NewString( + "", // Default fsType + "ext3", + ), + IsPersistent: true, + IsFsGroupSupported: true, + IsBlockSupported: true, + }, + } +} + +func (a *awsDriver) GetDriverInfo() *DriverInfo { + return &a.driverInfo +} + +func (a *awsDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) { + framework.SkipUnlessProviderIs("aws") +} + +// TODO: Fix authorization error in attach operation and uncomment below +/* +func (a *awsDriver) GetVolumeSource(readOnly bool, fsType string) *v1.VolumeSource { + volSource := v1.VolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ + VolumeID: a.volumeName, + ReadOnly: readOnly, + }, + } + if fsType != "" { + volSource.AWSElasticBlockStore.FSType = fsType + } + return &volSource +} + +func (a *awsDriver) GetPersistentVolumeSource(readOnly bool, fsType string) *v1.PersistentVolumeSource { + pvSource := v1.PersistentVolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ + VolumeID: a.volumeName, + ReadOnly: readOnly, + }, + } + if fsType != "" { + pvSource.AWSElasticBlockStore.FSType = fsType + } + return &pvSource +} +*/ + +func (a *awsDriver) GetDynamicProvisionStorageClass(fsType string) *storagev1.StorageClass { + provisioner := "kubernetes.io/aws-ebs" + parameters := map[string]string{} + if fsType != "" { + parameters["fsType"] = fsType + } + ns := a.driverInfo.Framework.Namespace.Name + suffix := fmt.Sprintf("%s-sc", a.driverInfo.Name) + + return getStorageClass(provisioner, parameters, nil, ns, suffix) +} + +func (a *awsDriver) CreateDriver() { +} + +func (a *awsDriver) CleanupDriver() { +} + +// TODO: Fix authorization error in attach operation and uncomment below +/* +func (a *awsDriver) CreateVolume(volType testpatterns.TestVolType) { + By("creating a test aws volume") + var err error + a.volumeName, err = framework.CreatePDWithRetry() + Expect(err).NotTo(HaveOccurred()) +} + +func (a *awsDriver) DeleteVolume(volType testpatterns.TestVolType) { + framework.DeletePDWithRetry(a.volumeName) +} +*/ diff --git a/test/e2e/storage/ephemeral_volume.go b/test/e2e/storage/ephemeral_volume.go index 5d1948e4b4e..6f1f9966766 100644 --- a/test/e2e/storage/ephemeral_volume.go +++ b/test/e2e/storage/ephemeral_volume.go @@ -27,11 +27,18 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/utils" + imageutils "k8s.io/kubernetes/test/utils/image" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) +var ( + volumePath = "/test-volume" + volumeName = "test-volume" + mountImage = imageutils.GetE2EImage(imageutils.Mounttest) +) + var _ = utils.SIGDescribe("Ephemeralstorage", func() { var ( c clientset.Interface diff --git a/test/e2e/storage/in_tree_volumes.go b/test/e2e/storage/in_tree_volumes.go new file mode 100644 index 00000000000..2e1d78cf1e1 --- /dev/null +++ b/test/e2e/storage/in_tree_volumes.go @@ -0,0 +1,91 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "fmt" + + . "github.com/onsi/ginkgo" + "k8s.io/api/core/v1" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/drivers" + "k8s.io/kubernetes/test/e2e/storage/testsuites" + "k8s.io/kubernetes/test/e2e/storage/utils" +) + +// List of testDrivers to be executed in below loop +var testDrivers = []func() drivers.TestDriver{ + drivers.InitNFSDriver, + drivers.InitGlusterFSDriver, + drivers.InitISCSIDriver, + drivers.InitRbdDriver, + drivers.InitCephFSDriver, + drivers.InitHostpathDriver, + drivers.InitHostpathSymlinkDriver, + drivers.InitEmptydirDriver, + drivers.InitCinderDriver, + drivers.InitGceDriver, + drivers.InitVSphereDriver, + drivers.InitAzureDriver, + drivers.InitAwsDriver, +} + +// List of testSuites to be executed in below loop +var testSuites = []func() testsuites.TestSuite{ + testsuites.InitVolumesTestSuite, + testsuites.InitVolumeIOTestSuite, + testsuites.InitVolumeModeTestSuite, + testsuites.InitSubPathTestSuite, +} + +// This executes testSuites for in-tree volumes. +var _ = utils.SIGDescribe("In-tree Volumes", func() { + f := framework.NewDefaultFramework("volumes") + + var ( + ns *v1.Namespace + config framework.VolumeTestConfig + ) + + BeforeEach(func() { + ns = f.Namespace + config = framework.VolumeTestConfig{ + Namespace: ns.Name, + Prefix: "volume", + } + }) + + for _, initDriver := range testDrivers { + curDriver := initDriver() + Context(fmt.Sprintf(drivers.GetDriverNameWithFeatureTags(curDriver)), func() { + driver := curDriver + + BeforeEach(func() { + // setupDriver + drivers.SetCommonDriverParameters(driver, f, config) + driver.CreateDriver() + }) + + AfterEach(func() { + // Cleanup driver + driver.CleanupDriver() + }) + + testsuites.RunTestSuite(f, config, driver, testSuites) + }) + } +}) diff --git a/test/e2e/storage/persistent_volumes-volumemode.go b/test/e2e/storage/persistent_volumes-volumemode.go deleted file mode 100644 index e3bb0441825..00000000000 --- a/test/e2e/storage/persistent_volumes-volumemode.go +++ /dev/null @@ -1,429 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "fmt" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - - "k8s.io/api/core/v1" - storagev1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/storage/utils" -) - -const ( - noProvisioner = "kubernetes.io/no-provisioner" - pvNamePrefix = "pv" -) - -func generateConfigsForStaticProvisionPVTest(scName string, volBindMode storagev1.VolumeBindingMode, - volMode v1.PersistentVolumeMode, pvSource v1.PersistentVolumeSource) (*storagev1.StorageClass, - framework.PersistentVolumeConfig, framework.PersistentVolumeClaimConfig) { - // StorageClass - scConfig := &storagev1.StorageClass{ - ObjectMeta: metav1.ObjectMeta{ - Name: scName, - }, - Provisioner: noProvisioner, - VolumeBindingMode: &volBindMode, - } - // PV - pvConfig := framework.PersistentVolumeConfig{ - PVSource: pvSource, - NamePrefix: pvNamePrefix, - StorageClassName: scName, - VolumeMode: &volMode, - } - // PVC - pvcConfig := framework.PersistentVolumeClaimConfig{ - AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, - StorageClassName: &scName, - VolumeMode: &volMode, - } - - return scConfig, pvConfig, pvcConfig -} - -func createPVTestResource(cs clientset.Interface, ns string, - scConfig *storagev1.StorageClass, pvConfig framework.PersistentVolumeConfig, - pvcConfig framework.PersistentVolumeClaimConfig) (*storagev1.StorageClass, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) { - - By("Creating sc") - sc, err := cs.StorageV1().StorageClasses().Create(scConfig) - Expect(err).NotTo(HaveOccurred()) - - By("Creating pv and pvc") - pv, pvc, err := framework.CreatePVPVC(cs, pvConfig, pvcConfig, ns, false) - framework.ExpectNoError(err) - framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns, pv, pvc)) - - By("Creating a pod") - // TODO(mkimuram): Need to set anti-affinity with storage server pod. - // Otherwise, storage server pod can also be affected on destructive tests. - pod, err := framework.CreateSecPod(cs, ns, []*v1.PersistentVolumeClaim{pvc}, false, "", false, false, framework.SELinuxLabel, nil, framework.PodStartTimeout) - Expect(err).NotTo(HaveOccurred()) - - return sc, pod, pv, pvc -} - -func createPVTestResourceWithFailure(cs clientset.Interface, ns string, - scConfig *storagev1.StorageClass, pvConfig framework.PersistentVolumeConfig, - pvcConfig framework.PersistentVolumeClaimConfig) (*storagev1.StorageClass, *v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) { - - By("Creating sc") - sc, err := cs.StorageV1().StorageClasses().Create(scConfig) - Expect(err).NotTo(HaveOccurred()) - - By("Creating pv and pvc") - pv, pvc, err := framework.CreatePVPVC(cs, pvConfig, pvcConfig, ns, false) - framework.ExpectNoError(err) - framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns, pv, pvc)) - - By("Creating a pod") - pod, err := framework.CreateSecPod(cs, ns, []*v1.PersistentVolumeClaim{pvc}, false, "", false, false, framework.SELinuxLabel, nil, framework.PodStartTimeout) - Expect(err).To(HaveOccurred()) - - return sc, pod, pv, pvc -} - -func deletePVTestResource(f *framework.Framework, cs clientset.Interface, ns string, sc *storagev1.StorageClass, - pod *v1.Pod, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) { - By("Deleting pod") - framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod)) - - By("Deleting pv and pvc") - errs := framework.PVPVCCleanup(cs, ns, pv, pvc) - if len(errs) > 0 { - framework.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs)) - } - - By("Deleting sc") - framework.ExpectNoError(cs.StorageV1().StorageClasses().Delete(sc.Name, nil)) -} - -func checkVolumeModeOfPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) { - if volMode == v1.PersistentVolumeBlock { - // Check if block exists - utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("test -b %s", path)) - - // Double check that it's not directory - utils.VerifyExecInPodFail(pod, fmt.Sprintf("test -d %s", path), 1) - } else { - // Check if directory exists - utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("test -d %s", path)) - - // Double check that it's not block - utils.VerifyExecInPodFail(pod, fmt.Sprintf("test -b %s", path), 1) - } -} - -func checkReadWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) { - if volMode == v1.PersistentVolumeBlock { - // random -> file1 - utils.VerifyExecInPodSucceed(pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1") - // file1 -> dev (write to dev) - utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path)) - // dev -> file2 (read from dev) - utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path)) - // file1 == file2 (check contents) - utils.VerifyExecInPodSucceed(pod, "diff /tmp/file1 /tmp/file2") - // Clean up temp files - utils.VerifyExecInPodSucceed(pod, "rm -f /tmp/file1 /tmp/file2") - - // Check that writing file to block volume fails - utils.VerifyExecInPodFail(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1) - } else { - // text -> file1 (write to file) - utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path)) - // grep file1 (read from file and check contents) - utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("grep 'Hello world.' %s/file1.txt", path)) - - // Check that writing to directory as block volume fails - utils.VerifyExecInPodFail(pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1) - } -} - -func skipBlockSupportTestIfUnsupported(volMode v1.PersistentVolumeMode, isBlockSupported bool) { - if volMode == v1.PersistentVolumeBlock && !isBlockSupported { - framework.Skipf("Skip assertion for block test for block supported plugin.(Block unsupported)") - } -} - -func skipBlockUnsupportTestUnlessUnspported(volMode v1.PersistentVolumeMode, isBlockSupported bool) { - if !(volMode == v1.PersistentVolumeBlock && !isBlockSupported) { - framework.Skipf("Skip assertion for block test for block unsupported plugin.(Block suppported or FileSystem test)") - } -} - -var _ = utils.SIGDescribe("PersistentVolumes-volumeMode", func() { - f := framework.NewDefaultFramework("pv-volmode") - const ( - pvTestSCPrefix = "pvtest" - ) - - var ( - cs clientset.Interface - ns string - scName string - isBlockSupported bool - serverIP string - secret *v1.Secret - serverPod *v1.Pod - pvSource v1.PersistentVolumeSource - sc *storagev1.StorageClass - pod *v1.Pod - pv *v1.PersistentVolume - pvc *v1.PersistentVolumeClaim - volMode v1.PersistentVolumeMode - volBindMode storagev1.VolumeBindingMode - ) - - BeforeEach(func() { - cs = f.ClientSet - ns = f.Namespace.Name - volBindMode = storagev1.VolumeBindingImmediate - }) - - AssertCreateDeletePodAndReadWriteVolume := func() { - // For block supported plugins - It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() { - skipBlockSupportTestIfUnsupported(volMode, isBlockSupported) - - scConfig, pvConfig, pvcConfig := generateConfigsForStaticProvisionPVTest(scName, volBindMode, volMode, pvSource) - sc, pod, pv, pvc = createPVTestResource(cs, ns, scConfig, pvConfig, pvcConfig) - defer deletePVTestResource(f, cs, ns, sc, pod, pv, pvc) - - By("Checking if persistent volume exists as expected volume mode") - checkVolumeModeOfPath(pod, volMode, "/mnt/volume1") - - By("Checking if read/write to persistent volume works properly") - checkReadWriteToPath(pod, volMode, "/mnt/volume1") - }) - - // For block unsupported plugins - It("should fail to create pod by failing to mount volume", func() { - skipBlockUnsupportTestUnlessUnspported(volMode, isBlockSupported) - - scConfig, pvConfig, pvcConfig := generateConfigsForStaticProvisionPVTest(scName, volBindMode, volMode, pvSource) - sc, pod, pv, pvc = createPVTestResourceWithFailure(cs, ns, scConfig, pvConfig, pvcConfig) - deletePVTestResource(f, cs, ns, sc, pod, pv, pvc) - }) - } - - verifyAll := func() { - AssertCreateDeletePodAndReadWriteVolume() - // TODO(mkimuram): Add more tests - } - - Describe("NFS", func() { - const pvTestNFSSCSuffix = "nfs" - - BeforeEach(func() { - isBlockSupported = false - scName = fmt.Sprintf("%v-%v-%v", pvTestSCPrefix, ns, pvTestNFSSCSuffix) - _, serverPod, serverIP = framework.NewNFSServer(cs, ns, []string{}) - - pvSource = v1.PersistentVolumeSource{ - NFS: &v1.NFSVolumeSource{ - Server: serverIP, - Path: "/", - ReadOnly: false, - }, - } - }) - - AfterEach(func() { - framework.Logf("AfterEach: deleting NFS server pod %q...", serverPod.Name) - err := framework.DeletePodWithWait(f, cs, serverPod) - Expect(err).NotTo(HaveOccurred(), "AfterEach: NFS server pod failed to delete") - }) - - Context("FileSystem volume Test", func() { - BeforeEach(func() { - volMode = v1.PersistentVolumeFilesystem - }) - - verifyAll() - }) - - Context("Block volume Test[Feature:BlockVolume]", func() { - BeforeEach(func() { - volMode = v1.PersistentVolumeBlock - }) - - verifyAll() - }) - }) - - Describe("iSCSI [Feature:Volumes]", func() { - const pvTestISCSISCSuffix = "iscsi" - - BeforeEach(func() { - isBlockSupported = true - scName = fmt.Sprintf("%v-%v-%v", pvTestSCPrefix, ns, pvTestISCSISCSuffix) - _, serverPod, serverIP = framework.NewISCSIServer(cs, ns) - - pvSource = v1.PersistentVolumeSource{ - ISCSI: &v1.ISCSIPersistentVolumeSource{ - TargetPortal: serverIP + ":3260", - IQN: "iqn.2003-01.org.linux-iscsi.f21.x8664:sn.4b0aae584f7c", - Lun: 0, - }, - } - }) - - AfterEach(func() { - framework.Logf("AfterEach: deleting iSCSI server pod %q...", serverPod.Name) - err := framework.DeletePodWithWait(f, cs, serverPod) - Expect(err).NotTo(HaveOccurred(), "AfterEach: iSCSI server pod failed to delete") - }) - - Context("FileSystem volume Test", func() { - BeforeEach(func() { - volMode = v1.PersistentVolumeFilesystem - }) - - verifyAll() - }) - - Context("Block volume Test[Feature:BlockVolume]", func() { - BeforeEach(func() { - volMode = v1.PersistentVolumeBlock - }) - - verifyAll() - }) - }) - - Describe("Ceph-RBD [Feature:Volumes]", func() { - const pvTestRBDSCSuffix = "rbd" - - BeforeEach(func() { - isBlockSupported = true - scName = fmt.Sprintf("%v-%v-%v", pvTestSCPrefix, ns, pvTestRBDSCSuffix) - _, serverPod, secret, serverIP = framework.NewRBDServer(cs, ns) - - framework.Logf("namespace: %v, secret.Name: %v", ns, secret.Name) - pvSource = v1.PersistentVolumeSource{ - RBD: &v1.RBDPersistentVolumeSource{ - CephMonitors: []string{serverIP}, - RBDPool: "rbd", - RBDImage: "foo", - RadosUser: "admin", - SecretRef: &v1.SecretReference{ - Name: secret.Name, - Namespace: ns, - }, - ReadOnly: false, - }, - } - }) - - AfterEach(func() { - framework.Logf("AfterEach: deleting Ceph-RDB server secret %q...", secret.Name) - secErr := cs.CoreV1().Secrets(ns).Delete(secret.Name, &metav1.DeleteOptions{}) - framework.Logf("AfterEach: deleting Ceph-RDB server pod %q...", serverPod.Name) - err := framework.DeletePodWithWait(f, cs, serverPod) - if secErr != nil || err != nil { - if secErr != nil { - framework.Logf("AfterEach: Ceph-RDB delete secret failed: %v", secErr) - } - if err != nil { - framework.Logf("AfterEach: Ceph-RDB server pod delete failed: %v", err) - } - framework.Failf("AfterEach: cleanup failed") - } - }) - - Context("FileSystem volume Test", func() { - BeforeEach(func() { - volMode = v1.PersistentVolumeFilesystem - }) - - verifyAll() - }) - - Context("Block volume Test[Feature:BlockVolume]", func() { - BeforeEach(func() { - volMode = v1.PersistentVolumeBlock - }) - - verifyAll() - }) - }) - - Describe("CephFS [Feature:Volumes]", func() { - const pvTestCephFSSCSuffix = "cephfs" - - BeforeEach(func() { - isBlockSupported = false - scName = fmt.Sprintf("%v-%v-%v", pvTestSCPrefix, ns, pvTestCephFSSCSuffix) - _, serverPod, secret, serverIP = framework.NewRBDServer(cs, ns) - - pvSource = v1.PersistentVolumeSource{ - CephFS: &v1.CephFSPersistentVolumeSource{ - Monitors: []string{serverIP + ":6789"}, - User: "kube", - SecretRef: &v1.SecretReference{ - Name: secret.Name, - Namespace: ns, - }, - ReadOnly: false, - }, - } - }) - - AfterEach(func() { - framework.Logf("AfterEach: deleting CephFS server secret %q...", secret.Name) - secErr := cs.CoreV1().Secrets(ns).Delete(secret.Name, &metav1.DeleteOptions{}) - framework.Logf("AfterEach: deleting CephFS server pod %q...", serverPod.Name) - err := framework.DeletePodWithWait(f, cs, serverPod) - if secErr != nil || err != nil { - if secErr != nil { - framework.Logf("AfterEach: CephFS delete secret failed: %v", secErr) - } - if err != nil { - framework.Logf("AfterEach: CephFS server pod delete failed: %v", err) - } - framework.Failf("AfterEach: cleanup failed") - } - }) - - Context("FileSystem volume Test", func() { - BeforeEach(func() { - volMode = v1.PersistentVolumeFilesystem - }) - - verifyAll() - }) - - Context("Block volume Test[Feature:BlockVolume]", func() { - BeforeEach(func() { - volMode = v1.PersistentVolumeBlock - }) - - verifyAll() - }) - }) - -}) diff --git a/test/e2e/storage/subpath.go b/test/e2e/storage/subpath.go index e6e630a7ff1..44563fe72af 100644 --- a/test/e2e/storage/subpath.go +++ b/test/e2e/storage/subpath.go @@ -17,70 +17,18 @@ limitations under the License. package storage import ( - "fmt" - "path/filepath" - "strings" - "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" - imageutils "k8s.io/kubernetes/test/utils/image" - - "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" ) -var ( - volumePath = "/test-volume" - volumeName = "test-volume" - probeVolumePath = "/probe-volume" - probeFilePath = probeVolumePath + "/probe-file" - fileName = "test-file" - retryDuration = 10 - mountImage = imageutils.GetE2EImage(imageutils.Mounttest) -) - -type volInfo struct { - source *v1.VolumeSource - node string - privilegedSecurityContext bool -} - -type volSource interface { - createVolume(f *framework.Framework) volInfo - cleanupVolume(f *framework.Framework) - getReadOnlyVolumeSpec() *v1.VolumeSource -} - -var initVolSources = map[string]func() volSource{ - "hostPath": initHostpath, - "hostPathSymlink": initHostpathSymlink, - "emptyDir": initEmptydir, - "gcePDPVC": initGCEPDPVC, - "gcePDPartitioned": initGCEPDPartition, - "nfs": initNFS, - "nfsPVC": initNFSPVC, - "gluster": initGluster, -} - var _ = utils.SIGDescribe("Subpath", func() { - var ( - subPath string - subPathDir string - filePathInSubpath string - filePathInVolume string - pod *v1.Pod - vol volSource - ) - f := framework.NewDefaultFramework("subpath") Context("Atomic writer volumes", func() { @@ -109,8 +57,8 @@ var _ = utils.SIGDescribe("Subpath", func() { Description: Containers in a pod can read content from a secret mounted volume which was configured with a subpath. */ framework.ConformanceIt("should support subpaths with secret pod", func() { - pod := testPodSubpath(f, "secret-key", "secret", &v1.VolumeSource{Secret: &v1.SecretVolumeSource{SecretName: "my-secret"}}, privilegedSecurityContext) - testBasicSubpath(f, "secret-value", pod) + pod := testsuites.TestPodSubpath(f, "secret-key", "secret", &v1.VolumeSource{Secret: &v1.SecretVolumeSource{SecretName: "my-secret"}}, privilegedSecurityContext) + testsuites.TestBasicSubpath(f, "secret-value", pod) }) /* @@ -119,8 +67,8 @@ var _ = utils.SIGDescribe("Subpath", func() { Description: Containers in a pod can read content from a configmap mounted volume which was configured with a subpath. */ framework.ConformanceIt("should support subpaths with configmap pod", func() { - pod := testPodSubpath(f, "configmap-key", "configmap", &v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "my-configmap"}}}, privilegedSecurityContext) - testBasicSubpath(f, "configmap-value", pod) + pod := testsuites.TestPodSubpath(f, "configmap-key", "configmap", &v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "my-configmap"}}}, privilegedSecurityContext) + testsuites.TestBasicSubpath(f, "configmap-value", pod) }) /* @@ -129,10 +77,10 @@ var _ = utils.SIGDescribe("Subpath", func() { Description: Containers in a pod can read content from a configmap mounted volume which was configured with a subpath and also using a mountpath that is a specific file. */ framework.ConformanceIt("should support subpaths with configmap pod with mountPath of existing file", func() { - pod := testPodSubpath(f, "configmap-key", "configmap", &v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "my-configmap"}}}, privilegedSecurityContext) + pod := testsuites.TestPodSubpath(f, "configmap-key", "configmap", &v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "my-configmap"}}}, privilegedSecurityContext) file := "/etc/resolv.conf" pod.Spec.Containers[0].VolumeMounts[0].MountPath = file - testBasicSubpathFile(f, "configmap-value", pod, file) + testsuites.TestBasicSubpathFile(f, "configmap-value", pod, file) }) /* @@ -141,12 +89,12 @@ var _ = utils.SIGDescribe("Subpath", func() { Description: Containers in a pod can read content from a downwardAPI mounted volume which was configured with a subpath. */ framework.ConformanceIt("should support subpaths with downward pod", func() { - pod := testPodSubpath(f, "downward/podname", "downwardAPI", &v1.VolumeSource{ + pod := testsuites.TestPodSubpath(f, "downward/podname", "downwardAPI", &v1.VolumeSource{ DownwardAPI: &v1.DownwardAPIVolumeSource{ Items: []v1.DownwardAPIVolumeFile{{Path: "downward/podname", FieldRef: &v1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.name"}}}, }, }, privilegedSecurityContext) - testBasicSubpath(f, pod.Name, pod) + testsuites.TestBasicSubpath(f, pod.Name, pod) }) /* @@ -155,7 +103,7 @@ var _ = utils.SIGDescribe("Subpath", func() { Description: Containers in a pod can read content from a projected mounted volume which was configured with a subpath. */ framework.ConformanceIt("should support subpaths with projected pod", func() { - pod := testPodSubpath(f, "projected/configmap-key", "projected", &v1.VolumeSource{ + pod := testsuites.TestPodSubpath(f, "projected/configmap-key", "projected", &v1.VolumeSource{ Projected: &v1.ProjectedVolumeSource{ Sources: []v1.VolumeProjection{ {ConfigMap: &v1.ConfigMapProjection{ @@ -165,949 +113,7 @@ var _ = utils.SIGDescribe("Subpath", func() { }, }, }, privilegedSecurityContext) - testBasicSubpath(f, "configmap-value", pod) + testsuites.TestBasicSubpath(f, "configmap-value", pod) }) }) - - for volType, volInit := range initVolSources { - curVolType := volType - curVolInit := volInit - - Context(fmt.Sprintf("[Volume type: %v]", curVolType), func() { - BeforeEach(func() { - By(fmt.Sprintf("Initializing %s volume", curVolType)) - vol = curVolInit() - subPath = f.Namespace.Name - subPathDir = filepath.Join(volumePath, subPath) - filePathInSubpath = filepath.Join(volumePath, fileName) - filePathInVolume = filepath.Join(subPathDir, fileName) - volInfo := vol.createVolume(f) - pod = testPodSubpath(f, subPath, curVolType, volInfo.source, volInfo.privilegedSecurityContext) - pod.Spec.NodeName = volInfo.node - }) - - AfterEach(func() { - By("Deleting pod") - err := framework.DeletePodWithWait(f, f.ClientSet, pod) - Expect(err).ToNot(HaveOccurred(), "while deleting pod") - - By("Cleaning up volume") - vol.cleanupVolume(f) - }) - - It("should support non-existent path", func() { - // Write the file in the subPath from container 0 - setWriteCommand(filePathInSubpath, &pod.Spec.Containers[0]) - - // Read it from outside the subPath from container 1 - testReadFile(f, filePathInVolume, pod, 1) - }) - - It("should support existing directory", func() { - // Create the directory - setInitCommand(pod, fmt.Sprintf("mkdir -p %s", subPathDir)) - - // Write the file in the subPath from container 0 - setWriteCommand(filePathInSubpath, &pod.Spec.Containers[0]) - - // Read it from outside the subPath from container 1 - testReadFile(f, filePathInVolume, pod, 1) - }) - - It("should support existing single file", func() { - // Create the file in the init container - setInitCommand(pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", subPathDir, filePathInVolume)) - - // Read it from inside the subPath from container 0 - testReadFile(f, filePathInSubpath, pod, 0) - }) - - It("should support file as subpath", func() { - // Create the file in the init container - setInitCommand(pod, fmt.Sprintf("echo %s > %s", f.Namespace.Name, subPathDir)) - - testBasicSubpath(f, f.Namespace.Name, pod) - }) - - It("should fail if subpath directory is outside the volume [Slow]", func() { - // Create the subpath outside the volume - setInitCommand(pod, fmt.Sprintf("ln -s /bin %s", subPathDir)) - - // Pod should fail - testPodFailSubpath(f, pod) - }) - - It("should fail if subpath file is outside the volume [Slow]", func() { - // Create the subpath outside the volume - setInitCommand(pod, fmt.Sprintf("ln -s /bin/sh %s", subPathDir)) - - // Pod should fail - testPodFailSubpath(f, pod) - }) - - It("should fail if non-existent subpath is outside the volume [Slow]", func() { - // Create the subpath outside the volume - setInitCommand(pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", subPathDir)) - - // Pod should fail - testPodFailSubpath(f, pod) - }) - - It("should fail if subpath with backstepping is outside the volume [Slow]", func() { - // Create the subpath outside the volume - setInitCommand(pod, fmt.Sprintf("ln -s ../ %s", subPathDir)) - - // Pod should fail - testPodFailSubpath(f, pod) - }) - - It("should support creating multiple subpath from same volumes [Slow]", func() { - subpathDir1 := filepath.Join(volumePath, "subpath1") - subpathDir2 := filepath.Join(volumePath, "subpath2") - filepath1 := filepath.Join("/test-subpath1", fileName) - filepath2 := filepath.Join("/test-subpath2", fileName) - setInitCommand(pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2)) - - addSubpathVolumeContainer(&pod.Spec.Containers[0], v1.VolumeMount{ - Name: volumeName, - MountPath: "/test-subpath1", - SubPath: "subpath1", - }) - addSubpathVolumeContainer(&pod.Spec.Containers[0], v1.VolumeMount{ - Name: volumeName, - MountPath: "/test-subpath2", - SubPath: "subpath2", - }) - - addMultipleWrites(&pod.Spec.Containers[0], filepath1, filepath2) - testMultipleReads(f, pod, 0, filepath1, filepath2) - }) - - It("should support restarting containers using directory as subpath [Slow]", func() { - // Create the directory - setInitCommand(pod, fmt.Sprintf("mkdir -p %v; touch %v", subPathDir, probeFilePath)) - - testPodContainerRestart(f, pod) - }) - - It("should support restarting containers using file as subpath [Slow]", func() { - // Create the file - setInitCommand(pod, fmt.Sprintf("touch %v; touch %v", subPathDir, probeFilePath)) - - testPodContainerRestart(f, pod) - }) - - It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() { - testSubpathReconstruction(f, pod, false) - }) - - It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() { - if curVolType == "hostPath" || curVolType == "hostPathSymlink" { - framework.Skipf("%s volume type does not support reconstruction, skipping", curVolType) - } - testSubpathReconstruction(f, pod, true) - }) - - It("should support readOnly directory specified in the volumeMount", func() { - // Create the directory - setInitCommand(pod, fmt.Sprintf("mkdir -p %s", subPathDir)) - - // Write the file in the volume from container 1 - setWriteCommand(filePathInVolume, &pod.Spec.Containers[1]) - - // Read it from inside the subPath from container 0 - pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true - testReadFile(f, filePathInSubpath, pod, 0) - }) - - It("should support readOnly file specified in the volumeMount", func() { - // Create the file - setInitCommand(pod, fmt.Sprintf("touch %s", subPathDir)) - - // Write the file in the volume from container 1 - setWriteCommand(subPathDir, &pod.Spec.Containers[1]) - - // Read it from inside the subPath from container 0 - pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true - testReadFile(f, volumePath, pod, 0) - }) - - It("should support existing directories when readOnly specified in the volumeSource", func() { - roVol := vol.getReadOnlyVolumeSpec() - if roVol == nil { - framework.Skipf("Volume type %v doesn't support readOnly source", curVolType) - } - - // Initialize content in the volume while it's writable - initVolumeContent(f, pod, filePathInVolume, filePathInSubpath) - - // Set volume source to read only - pod.Spec.Volumes[0].VolumeSource = *roVol - - // Read it from inside the subPath from container 0 - testReadFile(f, filePathInSubpath, pod, 0) - }) - - It("should fail for new directories when readOnly specified in the volumeSource", func() { - roVol := vol.getReadOnlyVolumeSpec() - if roVol == nil { - framework.Skipf("Volume type %v doesn't support readOnly source", curVolType) - } - - // Set volume source to read only - pod.Spec.Volumes[0].VolumeSource = *roVol - - // Pod should fail - testPodFailSubpathError(f, pod, "") - }) - }) - } - - // TODO: add a test case for the same disk with two partitions }) - -func testBasicSubpath(f *framework.Framework, contents string, pod *v1.Pod) { - testBasicSubpathFile(f, contents, pod, volumePath) -} - -func testBasicSubpathFile(f *framework.Framework, contents string, pod *v1.Pod, filepath string) { - setReadCommand(filepath, &pod.Spec.Containers[0]) - - By(fmt.Sprintf("Creating pod %s", pod.Name)) - f.TestContainerOutput("atomic-volume-subpath", pod, 0, []string{contents}) - - By(fmt.Sprintf("Deleting pod %s", pod.Name)) - err := framework.DeletePodWithWait(f, f.ClientSet, pod) - Expect(err).NotTo(HaveOccurred(), "while deleting pod") -} - -func testPodSubpath(f *framework.Framework, subpath, volumeType string, source *v1.VolumeSource, privilegedSecurityContext bool) *v1.Pod { - var ( - suffix = strings.ToLower(fmt.Sprintf("%s-%s", volumeType, rand.String(4))) - gracePeriod = int64(1) - probeVolumeName = "liveness-probe-volume" - ) - return &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("pod-subpath-test-%s", suffix), - Namespace: f.Namespace.Name, - }, - Spec: v1.PodSpec{ - InitContainers: []v1.Container{ - { - Name: fmt.Sprintf("init-volume-%s", suffix), - Image: imageutils.GetE2EImage(imageutils.BusyBox), - VolumeMounts: []v1.VolumeMount{ - { - Name: volumeName, - MountPath: volumePath, - }, - { - Name: probeVolumeName, - MountPath: probeVolumePath, - }, - }, - SecurityContext: &v1.SecurityContext{ - Privileged: &privilegedSecurityContext, - }, - }, - }, - Containers: []v1.Container{ - { - Name: fmt.Sprintf("test-container-subpath-%s", suffix), - Image: mountImage, - VolumeMounts: []v1.VolumeMount{ - { - Name: volumeName, - MountPath: volumePath, - SubPath: subpath, - }, - { - Name: probeVolumeName, - MountPath: probeVolumePath, - }, - }, - SecurityContext: &v1.SecurityContext{ - Privileged: &privilegedSecurityContext, - }, - }, - { - Name: fmt.Sprintf("test-container-volume-%s", suffix), - Image: mountImage, - VolumeMounts: []v1.VolumeMount{ - { - Name: volumeName, - MountPath: volumePath, - }, - { - Name: probeVolumeName, - MountPath: probeVolumePath, - }, - }, - SecurityContext: &v1.SecurityContext{ - Privileged: &privilegedSecurityContext, - }, - }, - }, - RestartPolicy: v1.RestartPolicyNever, - TerminationGracePeriodSeconds: &gracePeriod, - Volumes: []v1.Volume{ - { - Name: volumeName, - VolumeSource: *source, - }, - { - Name: probeVolumeName, - VolumeSource: v1.VolumeSource{ - EmptyDir: &v1.EmptyDirVolumeSource{}, - }, - }, - }, - SecurityContext: &v1.PodSecurityContext{ - SELinuxOptions: &v1.SELinuxOptions{ - Level: "s0:c0,c1", - }, - }, - }, - } -} - -func clearSubpathPodCommands(pod *v1.Pod) { - pod.Spec.InitContainers[0].Command = nil - pod.Spec.Containers[0].Args = nil - pod.Spec.Containers[1].Args = nil -} - -func setInitCommand(pod *v1.Pod, command string) { - pod.Spec.InitContainers[0].Command = []string{"/bin/sh", "-ec", command} -} - -func setWriteCommand(file string, container *v1.Container) { - container.Args = []string{ - fmt.Sprintf("--new_file_0644=%v", file), - fmt.Sprintf("--file_mode=%v", file), - } -} - -func addSubpathVolumeContainer(container *v1.Container, volumeMount v1.VolumeMount) { - existingMounts := container.VolumeMounts - container.VolumeMounts = append(existingMounts, volumeMount) -} - -func addMultipleWrites(container *v1.Container, file1 string, file2 string) { - container.Args = []string{ - fmt.Sprintf("--new_file_0644=%v", file1), - fmt.Sprintf("--new_file_0666=%v", file2), - } -} - -func testMultipleReads(f *framework.Framework, pod *v1.Pod, containerIndex int, file1 string, file2 string) { - By(fmt.Sprintf("Creating pod %s", pod.Name)) - f.TestContainerOutput("multi_subpath", pod, containerIndex, []string{ - "content of file \"" + file1 + "\": mount-tester new file", - "content of file \"" + file2 + "\": mount-tester new file", - }) -} - -func setReadCommand(file string, container *v1.Container) { - container.Args = []string{ - fmt.Sprintf("--file_content_in_loop=%v", file), - fmt.Sprintf("--retry_time=%d", retryDuration), - } -} - -func testReadFile(f *framework.Framework, file string, pod *v1.Pod, containerIndex int) { - setReadCommand(file, &pod.Spec.Containers[containerIndex]) - - By(fmt.Sprintf("Creating pod %s", pod.Name)) - f.TestContainerOutput("subpath", pod, containerIndex, []string{ - "content of file \"" + file + "\": mount-tester new file", - }) - - By(fmt.Sprintf("Deleting pod %s", pod.Name)) - err := framework.DeletePodWithWait(f, f.ClientSet, pod) - Expect(err).NotTo(HaveOccurred(), "while deleting pod") -} - -func testPodFailSubpath(f *framework.Framework, pod *v1.Pod) { - testPodFailSubpathError(f, pod, "subPath") -} - -func testPodFailSubpathError(f *framework.Framework, pod *v1.Pod, errorMsg string) { - By(fmt.Sprintf("Creating pod %s", pod.Name)) - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) - Expect(err).ToNot(HaveOccurred(), "while creating pod") - defer func() { - framework.DeletePodWithWait(f, f.ClientSet, pod) - }() - err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod) - Expect(err).To(HaveOccurred(), "while waiting for pod to be running") - - By("Checking for subpath error event") - selector := fields.Set{ - "involvedObject.kind": "Pod", - "involvedObject.name": pod.Name, - "involvedObject.namespace": f.Namespace.Name, - "reason": "Failed", - }.AsSelector().String() - options := metav1.ListOptions{FieldSelector: selector} - events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(options) - Expect(err).NotTo(HaveOccurred(), "while getting pod events") - Expect(len(events.Items)).NotTo(Equal(0), "no events found") - Expect(events.Items[0].Message).To(ContainSubstring(errorMsg), fmt.Sprintf("%q error not found", errorMsg)) -} - -// Tests that the existing subpath mount is detected when a container restarts -func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) { - pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure - - pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.BusyBox) - pod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", "sleep 100000"} - pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox) - pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"} - - // Add liveness probe to subpath container - pod.Spec.Containers[0].LivenessProbe = &v1.Probe{ - Handler: v1.Handler{ - Exec: &v1.ExecAction{ - Command: []string{"cat", probeFilePath}, - }, - }, - InitialDelaySeconds: 1, - FailureThreshold: 1, - PeriodSeconds: 2, - } - - // Start pod - By(fmt.Sprintf("Creating pod %s", pod.Name)) - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) - Expect(err).ToNot(HaveOccurred(), "while creating pod") - - err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod) - Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running") - - By("Failing liveness probe") - out, err := podContainerExec(pod, 1, fmt.Sprintf("rm %v", probeFilePath)) - framework.Logf("Pod exec output: %v", out) - Expect(err).ToNot(HaveOccurred(), "while failing liveness probe") - - // Check that container has restarted - By("Waiting for container to restart") - restarts := int32(0) - err = wait.PollImmediate(10*time.Second, 2*time.Minute, func() (bool, error) { - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) - if err != nil { - return false, err - } - for _, status := range pod.Status.ContainerStatuses { - if status.Name == pod.Spec.Containers[0].Name { - framework.Logf("Container %v, restarts: %v", status.Name, status.RestartCount) - restarts = status.RestartCount - if restarts > 0 { - framework.Logf("Container has restart count: %v", restarts) - return true, nil - } - } - } - return false, nil - }) - Expect(err).ToNot(HaveOccurred(), "while waiting for container to restart") - - // Fix liveness probe - By("Rewriting the file") - writeCmd := fmt.Sprintf("echo test-after > %v", probeFilePath) - out, err = podContainerExec(pod, 1, writeCmd) - framework.Logf("Pod exec output: %v", out) - Expect(err).ToNot(HaveOccurred(), "while rewriting the probe file") - - // Wait for container restarts to stabilize - By("Waiting for container to stop restarting") - stableCount := int(0) - stableThreshold := int(time.Minute / framework.Poll) - err = wait.PollImmediate(framework.Poll, 2*time.Minute, func() (bool, error) { - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) - if err != nil { - return false, err - } - for _, status := range pod.Status.ContainerStatuses { - if status.Name == pod.Spec.Containers[0].Name { - if status.RestartCount == restarts { - stableCount++ - if stableCount > stableThreshold { - framework.Logf("Container restart has stabilized") - return true, nil - } - } else { - restarts = status.RestartCount - stableCount = 0 - framework.Logf("Container has restart count: %v", restarts) - } - break - } - } - return false, nil - }) - Expect(err).ToNot(HaveOccurred(), "while waiting for container to stabilize") -} - -func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete bool) { - // This is mostly copied from TestVolumeUnmountsFromDeletedPodWithForceOption() - - // Change to busybox - pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.BusyBox) - pod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", "sleep 100000"} - pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox) - pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"} - - // If grace period is too short, then there is not enough time for the volume - // manager to cleanup the volumes - gracePeriod := int64(30) - pod.Spec.TerminationGracePeriodSeconds = &gracePeriod - - By(fmt.Sprintf("Creating pod %s", pod.Name)) - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) - Expect(err).ToNot(HaveOccurred(), "while creating pod") - - err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod) - Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running") - - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) - Expect(err).ToNot(HaveOccurred(), "while getting pod") - - utils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true) -} - -func initVolumeContent(f *framework.Framework, pod *v1.Pod, volumeFilepath, subpathFilepath string) { - setWriteCommand(volumeFilepath, &pod.Spec.Containers[1]) - setReadCommand(subpathFilepath, &pod.Spec.Containers[0]) - - By(fmt.Sprintf("Creating pod to write volume content %s", pod.Name)) - f.TestContainerOutput("subpath", pod, 0, []string{ - "content of file \"" + subpathFilepath + "\": mount-tester new file", - }) - - By(fmt.Sprintf("Deleting pod %s", pod.Name)) - err := framework.DeletePodWithWait(f, f.ClientSet, pod) - Expect(err).NotTo(HaveOccurred(), "while deleting pod") - - // This pod spec is going to be reused; reset all the commands - clearSubpathPodCommands(pod) -} - -func podContainerExec(pod *v1.Pod, containerIndex int, bashExec string) (string, error) { - return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--container", pod.Spec.Containers[containerIndex].Name, "--", "/bin/sh", "-c", bashExec) -} - -type hostpathSource struct { -} - -func initHostpath() volSource { - return &hostpathSource{} -} - -func (s *hostpathSource) createVolume(f *framework.Framework) volInfo { - return volInfo{ - source: &v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{ - Path: "/tmp", - }, - }, - privilegedSecurityContext: true, - } -} - -func (s *hostpathSource) getReadOnlyVolumeSpec() *v1.VolumeSource { - return nil -} - -func (s *hostpathSource) cleanupVolume(f *framework.Framework) { -} - -type hostpathSymlinkSource struct { -} - -func initHostpathSymlink() volSource { - return &hostpathSymlinkSource{} -} - -func (s *hostpathSymlinkSource) createVolume(f *framework.Framework) volInfo { - nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - Expect(len(nodes.Items)).NotTo(BeZero(), "No available nodes for scheduling") - - node0 := &nodes.Items[0] - sourcePath := fmt.Sprintf("/tmp/%v", f.Namespace.Name) - targetPath := fmt.Sprintf("/tmp/%v-link", f.Namespace.Name) - cmd := fmt.Sprintf("mkdir %v -m 777 && ln -s %v %v", sourcePath, sourcePath, targetPath) - privileged := true - - // Launch pod to initialize hostpath directory and symlink - pod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("hostpath-symlink-prep-%s", f.Namespace.Name), - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: fmt.Sprintf("init-volume-%s", f.Namespace.Name), - Image: imageutils.GetE2EImage(imageutils.BusyBox), - Command: []string{"/bin/sh", "-ec", cmd}, - VolumeMounts: []v1.VolumeMount{ - { - Name: volumeName, - MountPath: "/tmp", - }, - }, - SecurityContext: &v1.SecurityContext{ - Privileged: &privileged, - }, - }, - }, - RestartPolicy: v1.RestartPolicyNever, - Volumes: []v1.Volume{ - { - Name: volumeName, - VolumeSource: v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{ - Path: "/tmp", - }, - }, - }, - }, - NodeName: node0.Name, - }, - } - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) - Expect(err).ToNot(HaveOccurred(), "while creating hostpath init pod") - - err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) - Expect(err).ToNot(HaveOccurred(), "while waiting for hostpath init pod to succeed") - - err = framework.DeletePodWithWait(f, f.ClientSet, pod) - Expect(err).ToNot(HaveOccurred(), "while deleting hostpath init pod") - - return volInfo{ - source: &v1.VolumeSource{ - HostPath: &v1.HostPathVolumeSource{ - Path: targetPath, - }, - }, - node: node0.Name, - privilegedSecurityContext: privileged, - } -} - -func (s *hostpathSymlinkSource) getReadOnlyVolumeSpec() *v1.VolumeSource { - return nil -} - -func (s *hostpathSymlinkSource) cleanupVolume(f *framework.Framework) { -} - -type emptydirSource struct { -} - -func initEmptydir() volSource { - return &emptydirSource{} -} - -func (s *emptydirSource) createVolume(f *framework.Framework) volInfo { - return volInfo{ - source: &v1.VolumeSource{ - EmptyDir: &v1.EmptyDirVolumeSource{}, - }, - privilegedSecurityContext: true, - } -} - -func (s *emptydirSource) getReadOnlyVolumeSpec() *v1.VolumeSource { - return nil -} - -func (s *emptydirSource) cleanupVolume(f *framework.Framework) { -} - -type gcepdPVCSource struct { - pvc *v1.PersistentVolumeClaim -} - -func initGCEPDPVC() volSource { - framework.SkipUnlessProviderIs("gce", "gke") - return &gcepdPVCSource{} -} - -func (s *gcepdPVCSource) createVolume(f *framework.Framework) volInfo { - var err error - - framework.Logf("Creating GCE PD volume via dynamic provisioning") - testCase := storageClassTest{ - name: "subpath", - claimSize: "2G", - } - - pvc := newClaim(testCase, f.Namespace.Name, "subpath") - s.pvc, err = framework.CreatePVC(f.ClientSet, f.Namespace.Name, pvc) - framework.ExpectNoError(err, "Error creating PVC") - - // Launch pod to format the PD first - pod := &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("gcepd-prep-%s", f.Namespace.Name), - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: fmt.Sprintf("init-volume-%s", f.Namespace.Name), - Image: imageutils.GetE2EImage(imageutils.BusyBox), - Command: []string{"/bin/sh", "-ec", "echo nothing"}, - VolumeMounts: []v1.VolumeMount{ - { - Name: volumeName, - MountPath: "/vol", - }, - }, - }, - }, - RestartPolicy: v1.RestartPolicyNever, - Volumes: []v1.Volume{ - { - Name: volumeName, - VolumeSource: v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: s.pvc.Name, - }, - }, - }, - }, - }, - } - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) - Expect(err).ToNot(HaveOccurred(), "while creating gce pd init pod") - - err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) - Expect(err).ToNot(HaveOccurred(), "while waiting for gce pd init pod to succeed") - - err = framework.DeletePodWithWait(f, f.ClientSet, pod) - Expect(err).ToNot(HaveOccurred(), "while deleting gce pd init pod") - - return volInfo{ - source: &v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: s.pvc.Name, - }, - }, - privilegedSecurityContext: true, - } -} - -func (s *gcepdPVCSource) getReadOnlyVolumeSpec() *v1.VolumeSource { - return &v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: s.pvc.Name, - ReadOnly: true, - }, - } -} - -func (s *gcepdPVCSource) cleanupVolume(f *framework.Framework) { - if s.pvc != nil { - err := f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(s.pvc.Name, nil) - framework.ExpectNoError(err, "Error deleting PVC") - } -} - -type gcepdPartitionSource struct { - diskName string -} - -func initGCEPDPartition() volSource { - // Need to manually create, attach, partition, detach the GCE PD - // with disk name "subpath-partitioned-disk" before running this test - manual := true - if manual { - framework.Skipf("Skipping manual GCE PD partition test") - } - framework.SkipUnlessProviderIs("gce", "gke") - return &gcepdPartitionSource{diskName: "subpath-partitioned-disk"} -} - -func (s *gcepdPartitionSource) createVolume(f *framework.Framework) volInfo { - // TODO: automate partitioned of GCE PD once it supports raw block volumes - // framework.Logf("Creating GCE PD volume") - // s.diskName, err = framework.CreatePDWithRetry() - // framework.ExpectNoError(err, "Error creating PD") - - return volInfo{ - source: &v1.VolumeSource{ - GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ - PDName: s.diskName, - Partition: 1, - }, - }, - privilegedSecurityContext: true, - } -} - -func (s *gcepdPartitionSource) getReadOnlyVolumeSpec() *v1.VolumeSource { - return nil -} - -func (s *gcepdPartitionSource) cleanupVolume(f *framework.Framework) { - if s.diskName != "" { - // err := framework.DeletePDWithRetry(s.diskName) - // framework.ExpectNoError(err, "Error deleting PD") - } -} - -type nfsSource struct { - serverPod *v1.Pod - serverIP string -} - -func initNFS() volSource { - return &nfsSource{} -} - -func (s *nfsSource) createVolume(f *framework.Framework) volInfo { - framework.Logf("Creating NFS server") - _, s.serverPod, s.serverIP = framework.NewNFSServer(f.ClientSet, f.Namespace.Name, []string{"-G", "777", "/exports"}) - - return volInfo{ - source: &v1.VolumeSource{ - NFS: &v1.NFSVolumeSource{ - Server: s.serverIP, - Path: "/exports", - }, - }, - privilegedSecurityContext: true, - } -} - -func (s *nfsSource) getReadOnlyVolumeSpec() *v1.VolumeSource { - return &v1.VolumeSource{ - NFS: &v1.NFSVolumeSource{ - Server: s.serverIP, - Path: "/exports", - ReadOnly: true, - }, - } -} - -func (s *nfsSource) cleanupVolume(f *framework.Framework) { - if s.serverPod != nil { - framework.DeletePodWithWait(f, f.ClientSet, s.serverPod) - } -} - -type glusterSource struct { - serverPod *v1.Pod -} - -func initGluster() volSource { - framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu", "custom") - return &glusterSource{} -} - -func (s *glusterSource) createVolume(f *framework.Framework) volInfo { - framework.Logf("Creating GlusterFS server") - _, s.serverPod, _ = framework.NewGlusterfsServer(f.ClientSet, f.Namespace.Name) - - return volInfo{ - source: &v1.VolumeSource{ - Glusterfs: &v1.GlusterfsVolumeSource{ - EndpointsName: "gluster-server", - Path: "test_vol", - }, - }, - privilegedSecurityContext: true, - } -} - -func (s *glusterSource) getReadOnlyVolumeSpec() *v1.VolumeSource { - return &v1.VolumeSource{ - Glusterfs: &v1.GlusterfsVolumeSource{ - EndpointsName: "gluster-server", - Path: "test_vol", - ReadOnly: true, - }, - } -} - -func (s *glusterSource) cleanupVolume(f *framework.Framework) { - if s.serverPod != nil { - framework.DeletePodWithWait(f, f.ClientSet, s.serverPod) - err := f.ClientSet.CoreV1().Endpoints(f.Namespace.Name).Delete("gluster-server", nil) - Expect(err).NotTo(HaveOccurred(), "Gluster delete endpoints failed") - } -} - -// TODO: need a better way to wrap PVC. A generic framework should support both static and dynamic PV. -// For static PV, can reuse createVolume methods for inline volumes -type nfsPVCSource struct { - serverPod *v1.Pod - pvc *v1.PersistentVolumeClaim - pv *v1.PersistentVolume -} - -func initNFSPVC() volSource { - return &nfsPVCSource{} -} - -func (s *nfsPVCSource) createVolume(f *framework.Framework) volInfo { - var serverIP string - - framework.Logf("Creating NFS server") - _, s.serverPod, serverIP = framework.NewNFSServer(f.ClientSet, f.Namespace.Name, []string{"-G", "777", "/exports"}) - - pvConfig := framework.PersistentVolumeConfig{ - NamePrefix: "nfs-", - StorageClassName: f.Namespace.Name, - PVSource: v1.PersistentVolumeSource{ - NFS: &v1.NFSVolumeSource{ - Server: serverIP, - Path: "/exports", - }, - }, - } - pvcConfig := framework.PersistentVolumeClaimConfig{ - StorageClassName: &f.Namespace.Name, - } - - framework.Logf("Creating PVC and PV") - pv, pvc, err := framework.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false) - Expect(err).NotTo(HaveOccurred(), "PVC, PV creation failed") - - err = framework.WaitOnPVandPVC(f.ClientSet, f.Namespace.Name, pv, pvc) - Expect(err).NotTo(HaveOccurred(), "PVC, PV failed to bind") - - s.pvc = pvc - s.pv = pv - - return volInfo{ - source: &v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: pvc.Name, - }, - }, - privilegedSecurityContext: true, - } -} - -func (s *nfsPVCSource) getReadOnlyVolumeSpec() *v1.VolumeSource { - return &v1.VolumeSource{ - PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ - ClaimName: s.pvc.Name, - ReadOnly: true, - }, - } -} - -func (s *nfsPVCSource) cleanupVolume(f *framework.Framework) { - if s.pvc != nil || s.pv != nil { - if errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, s.pv, s.pvc); len(errs) != 0 { - framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs)) - } - } - if s.serverPod != nil { - framework.DeletePodWithWait(f, f.ClientSet, s.serverPod) - } -} diff --git a/test/e2e/storage/testpatterns/BUILD b/test/e2e/storage/testpatterns/BUILD new file mode 100644 index 00000000000..76a4a753cba --- /dev/null +++ b/test/e2e/storage/testpatterns/BUILD @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["testpattern.go"], + importpath = "k8s.io/kubernetes/test/e2e/storage/testpatterns", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//test/e2e/framework:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/test/e2e/storage/testpatterns/testpattern.go b/test/e2e/storage/testpatterns/testpattern.go new file mode 100644 index 00000000000..26b311697f9 --- /dev/null +++ b/test/e2e/storage/testpatterns/testpattern.go @@ -0,0 +1,168 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testpatterns + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/kubernetes/test/e2e/framework" +) + +const ( + // MinFileSize represents minimum file size (1 MiB) for testing + MinFileSize = 1 * framework.MiB + + // FileSizeSmall represents small file size (1 MiB) for testing + FileSizeSmall = 1 * framework.MiB + // FileSizeMedium represents medium file size (100 MiB) for testing + FileSizeMedium = 100 * framework.MiB + // FileSizeLarge represents large file size (1 GiB) for testing + FileSizeLarge = 1 * framework.GiB +) + +// TestVolType represents a volume type to be tested in a TestSuite +type TestVolType string + +var ( + // InlineVolume represents a volume type that is used inline in volumeSource + InlineVolume TestVolType = "InlineVolume" + // PreprovisionedPV represents a volume type for pre-provisioned Persistent Volume + PreprovisionedPV TestVolType = "PreprovisionedPV" + // DynamicPV represents a volume type for dynamic provisioned Persistent Volume + DynamicPV TestVolType = "DynamicPV" +) + +// TestPattern represents a combination of parameters to be tested in a TestSuite +type TestPattern struct { + Name string // Name of TestPattern + FeatureTag string // featureTag for the TestSuite + VolType TestVolType // Volume type of the volume + FsType string // Fstype of the volume + VolMode v1.PersistentVolumeMode // PersistentVolumeMode of the volume +} + +var ( + // Definitions for default fsType + + // DefaultFsInlineVolume is TestPattern for "Inline-volume (default fs)" + DefaultFsInlineVolume = TestPattern{ + Name: "Inline-volume (default fs)", + VolType: InlineVolume, + } + // DefaultFsPreprovisionedPV is TestPattern for "Pre-provisioned PV (default fs)" + DefaultFsPreprovisionedPV = TestPattern{ + Name: "Pre-provisioned PV (default fs)", + VolType: PreprovisionedPV, + } + // DefaultFsDynamicPV is TestPattern for "Dynamic PV (default fs)" + DefaultFsDynamicPV = TestPattern{ + Name: "Dynamic PV (default fs)", + VolType: DynamicPV, + } + + // Definitions for ext3 + + // Ext3InlineVolume is TestPattern for "Inline-volume (ext3)" + Ext3InlineVolume = TestPattern{ + Name: "Inline-volume (ext3)", + VolType: InlineVolume, + FsType: "ext3", + } + // Ext3PreprovisionedPV is TestPattern for "Pre-provisioned PV (ext3)" + Ext3PreprovisionedPV = TestPattern{ + Name: "Pre-provisioned PV (ext3)", + VolType: PreprovisionedPV, + FsType: "ext3", + } + // Ext3DynamicPV is TestPattern for "Dynamic PV (ext3)" + Ext3DynamicPV = TestPattern{ + Name: "Dynamic PV (ext3)", + VolType: DynamicPV, + FsType: "ext3", + } + + // Definitions for ext4 + + // Ext4InlineVolume is TestPattern for "Inline-volume (ext4)" + Ext4InlineVolume = TestPattern{ + Name: "Inline-volume (ext4)", + VolType: InlineVolume, + FsType: "ext4", + } + // Ext4PreprovisionedPV is TestPattern for "Pre-provisioned PV (ext4)" + Ext4PreprovisionedPV = TestPattern{ + Name: "Pre-provisioned PV (ext4)", + VolType: PreprovisionedPV, + FsType: "ext4", + } + // Ext4DynamicPV is TestPattern for "Dynamic PV (ext4)" + Ext4DynamicPV = TestPattern{ + Name: "Dynamic PV (ext4)", + VolType: DynamicPV, + FsType: "ext4", + } + + // Definitions for xfs + + // XfsInlineVolume is TestPattern for "Inline-volume (xfs)" + XfsInlineVolume = TestPattern{ + Name: "Inline-volume (xfs)", + VolType: InlineVolume, + FsType: "xfs", + } + // XfsPreprovisionedPV is TestPattern for "Pre-provisioned PV (xfs)" + XfsPreprovisionedPV = TestPattern{ + Name: "Pre-provisioned PV (xfs)", + VolType: PreprovisionedPV, + FsType: "xfs", + } + // XfsDynamicPV is TestPattern for "Dynamic PV (xfs)" + XfsDynamicPV = TestPattern{ + Name: "Dynamic PV (xfs)", + VolType: DynamicPV, + FsType: "xfs", + } + + // Definitions for Filesystem volume mode + + // FsVolModePreprovisionedPV is TestPattern for "Pre-provisioned PV (filesystem)" + FsVolModePreprovisionedPV = TestPattern{ + Name: "Pre-provisioned PV (filesystem volmode)", + VolType: PreprovisionedPV, + VolMode: v1.PersistentVolumeFilesystem, + } + // FsVolModeDynamicPV is TestPattern for "Dynamic PV (filesystem)" + FsVolModeDynamicPV = TestPattern{ + Name: "Dynamic PV (filesystem volmode)", + VolType: DynamicPV, + VolMode: v1.PersistentVolumeFilesystem, + } + + // Definitions for block volume mode + + // BlockVolModePreprovisionedPV is TestPattern for "Pre-provisioned PV (block)" + BlockVolModePreprovisionedPV = TestPattern{ + Name: "Pre-provisioned PV (block volmode)", + VolType: PreprovisionedPV, + VolMode: v1.PersistentVolumeBlock, + } + // BlockVolModeDynamicPV is TestPattern for "Dynamic PV (block)(immediate bind)" + BlockVolModeDynamicPV = TestPattern{ + Name: "Dynamic PV (block volmode)", + VolType: DynamicPV, + VolMode: v1.PersistentVolumeBlock, + } +) diff --git a/test/e2e/storage/testsuites/BUILD b/test/e2e/storage/testsuites/BUILD new file mode 100644 index 00000000000..e32594b8714 --- /dev/null +++ b/test/e2e/storage/testsuites/BUILD @@ -0,0 +1,47 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = [ + "base.go", + "subpath.go", + "volume_io.go", + "volumemode.go", + "volumes.go", + ], + importpath = "k8s.io/kubernetes/test/e2e/storage/testsuites", + visibility = ["//visibility:public"], + deps = [ + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/api/storage/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//test/e2e/framework:go_default_library", + "//test/e2e/storage/drivers:go_default_library", + "//test/e2e/storage/testpatterns:go_default_library", + "//test/e2e/storage/utils:go_default_library", + "//test/utils/image:go_default_library", + "//vendor/github.com/onsi/ginkgo:go_default_library", + "//vendor/github.com/onsi/gomega:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/test/e2e/storage/testsuites/base.go b/test/e2e/storage/testsuites/base.go new file mode 100644 index 00000000000..50b3a33ff1b --- /dev/null +++ b/test/e2e/storage/testsuites/base.go @@ -0,0 +1,306 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsuites + +import ( + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/drivers" + "k8s.io/kubernetes/test/e2e/storage/testpatterns" +) + +// TestSuite represents an interface for a set of tests whchi works with TestDriver +type TestSuite interface { + // getTestSuiteInfo returns the TestSuiteInfo for this TestSuite + getTestSuiteInfo() TestSuiteInfo + // skipUnsupportedTest skips the test if this TestSuite is not suitable to be tested with the combination of TestPattern and TestDriver + skipUnsupportedTest(testpatterns.TestPattern, drivers.TestDriver) + // execTest executes test of the testpattern for the driver + execTest(drivers.TestDriver, testpatterns.TestPattern) +} + +type TestSuiteInfo struct { + name string // name of the TestSuite + featureTag string // featureTag for the TestSuite + testPatterns []testpatterns.TestPattern // Slice of TestPattern for the TestSuite +} + +// TestResource represents an interface for resources that is used by TestSuite +type TestResource interface { + // setupResource sets up test resources to be used for the tests with the + // combination of TestDriver and TestPattern + setupResource(drivers.TestDriver, testpatterns.TestPattern) + // cleanupResource clean up the test resources created in SetupResource + cleanupResource(drivers.TestDriver, testpatterns.TestPattern) +} + +func getTestNameStr(suite TestSuite, pattern testpatterns.TestPattern) string { + tsInfo := suite.getTestSuiteInfo() + return fmt.Sprintf("[Testpattern: %s]%s %s%s", pattern.Name, pattern.FeatureTag, tsInfo.name, tsInfo.featureTag) +} + +// RunTestSuite runs all testpatterns of all testSuites for a driver +func RunTestSuite(f *framework.Framework, config framework.VolumeTestConfig, driver drivers.TestDriver, tsInits []func() TestSuite) { + for _, testSuiteInit := range tsInits { + suite := testSuiteInit() + tsInfo := suite.getTestSuiteInfo() + + for _, pattern := range tsInfo.testPatterns { + suite.execTest(driver, pattern) + } + } +} + +// skipUnsupportedTest will skip tests if the combination of driver, testsuite, and testpattern +// is not suitable to be tested. +// Whether it needs to be skipped is checked by following steps: +// 1. Check if Whether volType is supported by driver from its interface +// 2. Check if fsType is supported by driver +// 3. Check with driver specific logic +// 4. Check with testSuite specific logic +func skipUnsupportedTest(suite TestSuite, driver drivers.TestDriver, pattern testpatterns.TestPattern) { + dInfo := driver.GetDriverInfo() + + // 1. Check if Whether volType is supported by driver from its interface + var isSupported bool + switch pattern.VolType { + case testpatterns.InlineVolume: + _, isSupported = driver.(drivers.InlineVolumeTestDriver) + case testpatterns.PreprovisionedPV: + _, isSupported = driver.(drivers.PreprovisionedPVTestDriver) + case testpatterns.DynamicPV: + _, isSupported = driver.(drivers.DynamicPVTestDriver) + default: + isSupported = false + } + + if !isSupported { + framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType) + } + + // 2. Check if fsType is supported by driver + if !dInfo.SupportedFsType.Has(pattern.FsType) { + framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.FsType) + } + + // 3. Check with driver specific logic + driver.SkipUnsupportedTest(pattern) + + // 4. Check with testSuite specific logic + suite.skipUnsupportedTest(pattern, driver) +} + +// genericVolumeTestResource is a generic implementation of TestResource that wil be able to +// be used in most of TestSuites. +// See volume_io.go or volumes.go in test/e2e/storage/testsuites/ for how to use this resource. +// Also, see subpath.go in the same directory for how to extend and use it. +type genericVolumeTestResource struct { + driver drivers.TestDriver + volType string + volSource *v1.VolumeSource + pvc *v1.PersistentVolumeClaim + pv *v1.PersistentVolume + sc *storagev1.StorageClass +} + +var _ TestResource = &genericVolumeTestResource{} + +// SetupResource sets up genericVolumeTestResource +func (r *genericVolumeTestResource) setupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) { + r.driver = driver + dInfo := driver.GetDriverInfo() + f := dInfo.Framework + cs := f.ClientSet + fsType := pattern.FsType + volType := pattern.VolType + + // Create volume for pre-provisioned volume tests + drivers.CreateVolume(driver, volType) + + switch volType { + case testpatterns.InlineVolume: + framework.Logf("Creating resource for inline volume") + if iDriver, ok := driver.(drivers.InlineVolumeTestDriver); ok { + r.volSource = iDriver.GetVolumeSource(false, fsType) + r.volType = dInfo.Name + } + case testpatterns.PreprovisionedPV: + framework.Logf("Creating resource for pre-provisioned PV") + if pDriver, ok := driver.(drivers.PreprovisionedPVTestDriver); ok { + pvSource := pDriver.GetPersistentVolumeSource(false, fsType) + if pvSource != nil { + r.volSource, r.pv, r.pvc = createVolumeSourceWithPVCPV(f, dInfo.Name, pvSource, false) + } + r.volType = fmt.Sprintf("%s-preprovisionedPV", dInfo.Name) + } + case testpatterns.DynamicPV: + framework.Logf("Creating resource for dynamic PV") + if dDriver, ok := driver.(drivers.DynamicPVTestDriver); ok { + claimSize := "2Gi" + r.sc = dDriver.GetDynamicProvisionStorageClass(fsType) + + By("creating a StorageClass " + r.sc.Name) + var err error + r.sc, err = cs.StorageV1().StorageClasses().Create(r.sc) + Expect(err).NotTo(HaveOccurred()) + + if r.sc != nil { + r.volSource, r.pv, r.pvc = createVolumeSourceWithPVCPVFromDynamicProvisionSC( + f, dInfo.Name, claimSize, r.sc, false, nil) + } + r.volType = fmt.Sprintf("%s-dynamicPV", dInfo.Name) + } + default: + framework.Failf("genericVolumeTestResource doesn't support: %s", volType) + } + + if r.volSource == nil { + framework.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, volType) + } +} + +// CleanupResource clean up genericVolumeTestResource +func (r *genericVolumeTestResource) cleanupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) { + dInfo := driver.GetDriverInfo() + f := dInfo.Framework + volType := pattern.VolType + + if r.pvc != nil || r.pv != nil { + By("Deleting pv and pvc") + if errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.pv, r.pvc); len(errs) != 0 { + framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs)) + } + } + + if r.sc != nil { + By("Deleting sc") + deleteStorageClass(f.ClientSet, r.sc.Name) + } + + // Cleanup volume for pre-provisioned volume tests + drivers.DeleteVolume(driver, volType) +} + +func createVolumeSourceWithPVCPV( + f *framework.Framework, + name string, + pvSource *v1.PersistentVolumeSource, + readOnly bool, +) (*v1.VolumeSource, *v1.PersistentVolume, *v1.PersistentVolumeClaim) { + pvConfig := framework.PersistentVolumeConfig{ + NamePrefix: fmt.Sprintf("%s-", name), + StorageClassName: f.Namespace.Name, + PVSource: *pvSource, + } + pvcConfig := framework.PersistentVolumeClaimConfig{ + StorageClassName: &f.Namespace.Name, + } + + framework.Logf("Creating PVC and PV") + pv, pvc, err := framework.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false) + Expect(err).NotTo(HaveOccurred(), "PVC, PV creation failed") + + err = framework.WaitOnPVandPVC(f.ClientSet, f.Namespace.Name, pv, pvc) + Expect(err).NotTo(HaveOccurred(), "PVC, PV failed to bind") + + volSource := &v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvc.Name, + ReadOnly: readOnly, + }, + } + return volSource, pv, pvc +} + +func createVolumeSourceWithPVCPVFromDynamicProvisionSC( + f *framework.Framework, + name string, + claimSize string, + sc *storagev1.StorageClass, + readOnly bool, + volMode *v1.PersistentVolumeMode, +) (*v1.VolumeSource, *v1.PersistentVolume, *v1.PersistentVolumeClaim) { + cs := f.ClientSet + ns := f.Namespace.Name + + By("creating a claim") + pvc := getClaim(claimSize, ns) + pvc.Spec.StorageClassName = &sc.Name + if volMode != nil { + pvc.Spec.VolumeMode = volMode + } + + var err error + pvc, err = cs.CoreV1().PersistentVolumeClaims(ns).Create(pvc) + Expect(err).NotTo(HaveOccurred()) + + err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) + Expect(err).NotTo(HaveOccurred()) + + pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + pv, err := cs.CoreV1().PersistentVolumes().Get(pvc.Spec.VolumeName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + volSource := &v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvc.Name, + ReadOnly: readOnly, + }, + } + return volSource, pv, pvc +} + +func getClaim(claimSize string, ns string) *v1.PersistentVolumeClaim { + claim := v1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "pvc-", + Namespace: ns, + }, + Spec: v1.PersistentVolumeClaimSpec{ + AccessModes: []v1.PersistentVolumeAccessMode{ + v1.ReadWriteOnce, + }, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName(v1.ResourceStorage): resource.MustParse(claimSize), + }, + }, + }, + } + + return &claim +} + +// deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found" +func deleteStorageClass(cs clientset.Interface, className string) { + err := cs.StorageV1().StorageClasses().Delete(className, nil) + if err != nil && !apierrs.IsNotFound(err) { + Expect(err).NotTo(HaveOccurred()) + } +} diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go new file mode 100644 index 00000000000..ade96f60ee7 --- /dev/null +++ b/test/e2e/storage/testsuites/subpath.go @@ -0,0 +1,747 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsuites + +import ( + "fmt" + "path/filepath" + "strings" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/drivers" + "k8s.io/kubernetes/test/e2e/storage/testpatterns" + "k8s.io/kubernetes/test/e2e/storage/utils" + imageutils "k8s.io/kubernetes/test/utils/image" + + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var ( + volumePath = "/test-volume" + volumeName = "test-volume" + probeVolumePath = "/probe-volume" + probeFilePath = probeVolumePath + "/probe-file" + fileName = "test-file" + retryDuration = 20 + mountImage = imageutils.GetE2EImage(imageutils.Mounttest) +) + +type subPathTestSuite struct { + tsInfo TestSuiteInfo +} + +var _ TestSuite = &subPathTestSuite{} + +// InitSubPathTestSuite returns subPathTestSuite that implements TestSuite interface +func InitSubPathTestSuite() TestSuite { + return &subPathTestSuite{ + tsInfo: TestSuiteInfo{ + name: "subPath", + testPatterns: []testpatterns.TestPattern{ + testpatterns.DefaultFsInlineVolume, + testpatterns.DefaultFsPreprovisionedPV, + testpatterns.DefaultFsDynamicPV, + }, + }, + } +} + +func (s *subPathTestSuite) getTestSuiteInfo() TestSuiteInfo { + return s.tsInfo +} + +func (s *subPathTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver drivers.TestDriver) { +} + +func createSubPathTestInput(pattern testpatterns.TestPattern, resource subPathTestResource) subPathTestInput { + driver := resource.driver + dInfo := driver.GetDriverInfo() + f := dInfo.Framework + subPath := f.Namespace.Name + subPathDir := filepath.Join(volumePath, subPath) + + return subPathTestInput{ + f: f, + subPathDir: subPathDir, + filePathInSubpath: filepath.Join(volumePath, fileName), + filePathInVolume: filepath.Join(subPathDir, fileName), + volType: resource.volType, + pod: resource.pod, + volSource: resource.genericVolumeTestResource.volSource, + roVol: resource.roVolSource, + } +} + +func (s *subPathTestSuite) execTest(driver drivers.TestDriver, pattern testpatterns.TestPattern) { + Context(getTestNameStr(s, pattern), func() { + var ( + resource subPathTestResource + input subPathTestInput + needsCleanup bool + ) + + BeforeEach(func() { + needsCleanup = false + // Skip unsupported tests to avoid unnecessary resource initialization + skipUnsupportedTest(s, driver, pattern) + needsCleanup = true + + // Setup test resource for driver and testpattern + resource := subPathTestResource{} + resource.setupResource(driver, pattern) + + // Create test input + input = createSubPathTestInput(pattern, resource) + }) + + AfterEach(func() { + if needsCleanup { + resource.cleanupResource(driver, pattern) + } + }) + + testSubPath(&input) + }) +} + +type subPathTestResource struct { + genericVolumeTestResource + + roVolSource *v1.VolumeSource + pod *v1.Pod +} + +var _ TestResource = &subPathTestResource{} + +func (s *subPathTestResource) setupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) { + s.driver = driver + dInfo := s.driver.GetDriverInfo() + f := dInfo.Framework + fsType := pattern.FsType + volType := pattern.VolType + + // Setup generic test resource + s.genericVolumeTestResource.setupResource(driver, pattern) + + // Setup subPath test dependent resource + switch volType { + case testpatterns.InlineVolume: + if iDriver, ok := driver.(drivers.InlineVolumeTestDriver); ok { + s.roVolSource = iDriver.GetVolumeSource(true, fsType) + } + case testpatterns.PreprovisionedPV: + s.roVolSource = &v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: s.genericVolumeTestResource.pvc.Name, + ReadOnly: true, + }, + } + case testpatterns.DynamicPV: + s.roVolSource = &v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: s.genericVolumeTestResource.pvc.Name, + ReadOnly: true, + }, + } + default: + framework.Failf("SubPath test doesn't support: %s", volType) + } + + subPath := f.Namespace.Name + config := dInfo.Config + s.pod = TestPodSubpath(f, subPath, s.volType, s.volSource, true) + s.pod.Spec.NodeName = config.ClientNodeName + s.pod.Spec.NodeSelector = config.NodeSelector +} + +func (s *subPathTestResource) cleanupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) { + dInfo := driver.GetDriverInfo() + f := dInfo.Framework + + // Cleanup subPath test dependent resource + By("Deleting pod") + err := framework.DeletePodWithWait(f, f.ClientSet, s.pod) + Expect(err).ToNot(HaveOccurred(), "while deleting pod") + + // Cleanup generic test resource + s.genericVolumeTestResource.cleanupResource(driver, pattern) +} + +type subPathTestInput struct { + f *framework.Framework + subPathDir string + filePathInSubpath string + filePathInVolume string + volType string + pod *v1.Pod + volSource *v1.VolumeSource + roVol *v1.VolumeSource +} + +func testSubPath(input *subPathTestInput) { + It("should support non-existent path", func() { + // Write the file in the subPath from container 0 + setWriteCommand(input.filePathInSubpath, &input.pod.Spec.Containers[0]) + + // Read it from outside the subPath from container 1 + testReadFile(input.f, input.filePathInVolume, input.pod, 1) + }) + + It("should support existing directory", func() { + // Create the directory + setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir)) + + // Write the file in the subPath from container 0 + setWriteCommand(input.filePathInSubpath, &input.pod.Spec.Containers[0]) + + // Read it from outside the subPath from container 1 + testReadFile(input.f, input.filePathInVolume, input.pod, 1) + }) + + It("should support existing single file", func() { + // Create the file in the init container + setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", input.subPathDir, input.filePathInVolume)) + + // Read it from inside the subPath from container 0 + testReadFile(input.f, input.filePathInSubpath, input.pod, 0) + }) + + It("should support file as subpath", func() { + // Create the file in the init container + setInitCommand(input.pod, fmt.Sprintf("echo %s > %s", input.f.Namespace.Name, input.subPathDir)) + + TestBasicSubpath(input.f, input.f.Namespace.Name, input.pod) + }) + + It("should fail if subpath directory is outside the volume [Slow]", func() { + // Create the subpath outside the volume + setInitCommand(input.pod, fmt.Sprintf("ln -s /bin %s", input.subPathDir)) + + // Pod should fail + testPodFailSubpath(input.f, input.pod) + }) + + It("should fail if subpath file is outside the volume [Slow]", func() { + // Create the subpath outside the volume + setInitCommand(input.pod, fmt.Sprintf("ln -s /bin/sh %s", input.subPathDir)) + + // Pod should fail + testPodFailSubpath(input.f, input.pod) + }) + + It("should fail if non-existent subpath is outside the volume [Slow]", func() { + // Create the subpath outside the volume + setInitCommand(input.pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", input.subPathDir)) + + // Pod should fail + testPodFailSubpath(input.f, input.pod) + }) + + It("should fail if subpath with backstepping is outside the volume [Slow]", func() { + // Create the subpath outside the volume + setInitCommand(input.pod, fmt.Sprintf("ln -s ../ %s", input.subPathDir)) + + // Pod should fail + testPodFailSubpath(input.f, input.pod) + }) + + It("should support creating multiple subpath from same volumes [Slow]", func() { + subpathDir1 := filepath.Join(volumePath, "subpath1") + subpathDir2 := filepath.Join(volumePath, "subpath2") + filepath1 := filepath.Join("/test-subpath1", fileName) + filepath2 := filepath.Join("/test-subpath2", fileName) + setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2)) + + addSubpathVolumeContainer(&input.pod.Spec.Containers[0], v1.VolumeMount{ + Name: volumeName, + MountPath: "/test-subpath1", + SubPath: "subpath1", + }) + addSubpathVolumeContainer(&input.pod.Spec.Containers[0], v1.VolumeMount{ + Name: volumeName, + MountPath: "/test-subpath2", + SubPath: "subpath2", + }) + + addMultipleWrites(&input.pod.Spec.Containers[0], filepath1, filepath2) + testMultipleReads(input.f, input.pod, 0, filepath1, filepath2) + }) + + It("should support restarting containers using directory as subpath [Slow]", func() { + // Create the directory + setInitCommand(input.pod, fmt.Sprintf("mkdir -p %v; touch %v", input.subPathDir, probeFilePath)) + + testPodContainerRestart(input.f, input.pod) + }) + + It("should support restarting containers using file as subpath [Slow]", func() { + // Create the file + setInitCommand(input.pod, fmt.Sprintf("touch %v; touch %v", input.subPathDir, probeFilePath)) + + testPodContainerRestart(input.f, input.pod) + }) + + It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() { + testSubpathReconstruction(input.f, input.pod, false) + }) + + It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() { + if input.volType == "hostPath" || input.volType == "hostPathSymlink" { + framework.Skipf("%s volume type does not support reconstruction, skipping", input.volType) + } + testSubpathReconstruction(input.f, input.pod, true) + }) + + It("should support readOnly directory specified in the volumeMount", func() { + // Create the directory + setInitCommand(input.pod, fmt.Sprintf("mkdir -p %s", input.subPathDir)) + + // Write the file in the volume from container 1 + setWriteCommand(input.filePathInVolume, &input.pod.Spec.Containers[1]) + + // Read it from inside the subPath from container 0 + input.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true + testReadFile(input.f, input.filePathInSubpath, input.pod, 0) + }) + + It("should support readOnly file specified in the volumeMount", func() { + // Create the file + setInitCommand(input.pod, fmt.Sprintf("touch %s", input.subPathDir)) + + // Write the file in the volume from container 1 + setWriteCommand(input.subPathDir, &input.pod.Spec.Containers[1]) + + // Read it from inside the subPath from container 0 + input.pod.Spec.Containers[0].VolumeMounts[0].ReadOnly = true + testReadFile(input.f, volumePath, input.pod, 0) + }) + + It("should support existing directories when readOnly specified in the volumeSource", func() { + if input.roVol == nil { + framework.Skipf("Volume type %v doesn't support readOnly source", input.volType) + } + + // Initialize content in the volume while it's writable + initVolumeContent(input.f, input.pod, input.filePathInVolume, input.filePathInSubpath) + + // Set volume source to read only + input.pod.Spec.Volumes[0].VolumeSource = *input.roVol + + // Read it from inside the subPath from container 0 + testReadFile(input.f, input.filePathInSubpath, input.pod, 0) + }) + + It("should fail for new directories when readOnly specified in the volumeSource", func() { + if input.roVol == nil { + framework.Skipf("Volume type %v doesn't support readOnly source", input.volType) + } + + // Format the volume while it's writable + formatVolume(input.f, input.volSource) + + // Set volume source to read only + input.pod.Spec.Volumes[0].VolumeSource = *input.roVol + // Pod should fail + testPodFailSubpathError(input.f, input.pod, "") + }) + + // TODO: add a test case for the same disk with two partitions +} + +// TestBasicSubpath runs basic subpath test +func TestBasicSubpath(f *framework.Framework, contents string, pod *v1.Pod) { + TestBasicSubpathFile(f, contents, pod, volumePath) +} + +// TestBasicSubpathFile runs basic subpath file test +func TestBasicSubpathFile(f *framework.Framework, contents string, pod *v1.Pod, filepath string) { + setReadCommand(filepath, &pod.Spec.Containers[0]) + + By(fmt.Sprintf("Creating pod %s", pod.Name)) + f.TestContainerOutput("atomic-volume-subpath", pod, 0, []string{contents}) + + By(fmt.Sprintf("Deleting pod %s", pod.Name)) + err := framework.DeletePodWithWait(f, f.ClientSet, pod) + Expect(err).NotTo(HaveOccurred(), "while deleting pod") +} + +// TestPodSubpath runs pod subpath test +func TestPodSubpath(f *framework.Framework, subpath, volumeType string, source *v1.VolumeSource, privilegedSecurityContext bool) *v1.Pod { + var ( + suffix = strings.ToLower(fmt.Sprintf("%s-%s", volumeType, rand.String(4))) + gracePeriod = int64(1) + probeVolumeName = "liveness-probe-volume" + ) + return &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("pod-subpath-test-%s", suffix), + Namespace: f.Namespace.Name, + }, + Spec: v1.PodSpec{ + InitContainers: []v1.Container{ + { + Name: fmt.Sprintf("init-volume-%s", suffix), + Image: imageutils.GetE2EImage(imageutils.BusyBox), + VolumeMounts: []v1.VolumeMount{ + { + Name: volumeName, + MountPath: volumePath, + }, + { + Name: probeVolumeName, + MountPath: probeVolumePath, + }, + }, + SecurityContext: &v1.SecurityContext{ + Privileged: &privilegedSecurityContext, + }, + }, + }, + Containers: []v1.Container{ + { + Name: fmt.Sprintf("test-container-subpath-%s", suffix), + Image: mountImage, + VolumeMounts: []v1.VolumeMount{ + { + Name: volumeName, + MountPath: volumePath, + SubPath: subpath, + }, + { + Name: probeVolumeName, + MountPath: probeVolumePath, + }, + }, + SecurityContext: &v1.SecurityContext{ + Privileged: &privilegedSecurityContext, + }, + }, + { + Name: fmt.Sprintf("test-container-volume-%s", suffix), + Image: mountImage, + VolumeMounts: []v1.VolumeMount{ + { + Name: volumeName, + MountPath: volumePath, + }, + { + Name: probeVolumeName, + MountPath: probeVolumePath, + }, + }, + SecurityContext: &v1.SecurityContext{ + Privileged: &privilegedSecurityContext, + }, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + TerminationGracePeriodSeconds: &gracePeriod, + Volumes: []v1.Volume{ + { + Name: volumeName, + VolumeSource: *source, + }, + { + Name: probeVolumeName, + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + }, + }, + }, + SecurityContext: &v1.PodSecurityContext{ + SELinuxOptions: &v1.SELinuxOptions{ + Level: "s0:c0,c1", + }, + }, + }, + } +} + +func clearSubpathPodCommands(pod *v1.Pod) { + pod.Spec.InitContainers[0].Command = nil + pod.Spec.Containers[0].Args = nil + pod.Spec.Containers[1].Args = nil +} + +func setInitCommand(pod *v1.Pod, command string) { + pod.Spec.InitContainers[0].Command = []string{"/bin/sh", "-ec", command} +} + +func setWriteCommand(file string, container *v1.Container) { + container.Args = []string{ + fmt.Sprintf("--new_file_0644=%v", file), + fmt.Sprintf("--file_mode=%v", file), + } +} + +func addSubpathVolumeContainer(container *v1.Container, volumeMount v1.VolumeMount) { + existingMounts := container.VolumeMounts + container.VolumeMounts = append(existingMounts, volumeMount) +} + +func addMultipleWrites(container *v1.Container, file1 string, file2 string) { + container.Args = []string{ + fmt.Sprintf("--new_file_0644=%v", file1), + fmt.Sprintf("--new_file_0666=%v", file2), + } +} + +func testMultipleReads(f *framework.Framework, pod *v1.Pod, containerIndex int, file1 string, file2 string) { + By(fmt.Sprintf("Creating pod %s", pod.Name)) + f.TestContainerOutput("multi_subpath", pod, containerIndex, []string{ + "content of file \"" + file1 + "\": mount-tester new file", + "content of file \"" + file2 + "\": mount-tester new file", + }) +} + +func setReadCommand(file string, container *v1.Container) { + container.Args = []string{ + fmt.Sprintf("--file_content_in_loop=%v", file), + fmt.Sprintf("--retry_time=%d", retryDuration), + } +} + +func testReadFile(f *framework.Framework, file string, pod *v1.Pod, containerIndex int) { + setReadCommand(file, &pod.Spec.Containers[containerIndex]) + + By(fmt.Sprintf("Creating pod %s", pod.Name)) + f.TestContainerOutput("subpath", pod, containerIndex, []string{ + "content of file \"" + file + "\": mount-tester new file", + }) + + By(fmt.Sprintf("Deleting pod %s", pod.Name)) + err := framework.DeletePodWithWait(f, f.ClientSet, pod) + Expect(err).NotTo(HaveOccurred(), "while deleting pod") +} + +func testPodFailSubpath(f *framework.Framework, pod *v1.Pod) { + testPodFailSubpathError(f, pod, "subPath") +} + +func testPodFailSubpathError(f *framework.Framework, pod *v1.Pod, errorMsg string) { + By(fmt.Sprintf("Creating pod %s", pod.Name)) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + Expect(err).ToNot(HaveOccurred(), "while creating pod") + defer func() { + framework.DeletePodWithWait(f, f.ClientSet, pod) + }() + err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod) + Expect(err).To(HaveOccurred(), "while waiting for pod to be running") + + By("Checking for subpath error event") + selector := fields.Set{ + "involvedObject.kind": "Pod", + "involvedObject.name": pod.Name, + "involvedObject.namespace": f.Namespace.Name, + "reason": "Failed", + }.AsSelector().String() + options := metav1.ListOptions{FieldSelector: selector} + events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(options) + Expect(err).NotTo(HaveOccurred(), "while getting pod events") + Expect(len(events.Items)).NotTo(Equal(0), "no events found") + Expect(events.Items[0].Message).To(ContainSubstring(errorMsg), fmt.Sprintf("%q error not found", errorMsg)) +} + +// Tests that the existing subpath mount is detected when a container restarts +func testPodContainerRestart(f *framework.Framework, pod *v1.Pod) { + pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure + + pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.BusyBox) + pod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", "sleep 100000"} + pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox) + pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"} + + // Add liveness probe to subpath container + pod.Spec.Containers[0].LivenessProbe = &v1.Probe{ + Handler: v1.Handler{ + Exec: &v1.ExecAction{ + Command: []string{"cat", probeFilePath}, + }, + }, + InitialDelaySeconds: 1, + FailureThreshold: 1, + PeriodSeconds: 2, + } + + // Start pod + By(fmt.Sprintf("Creating pod %s", pod.Name)) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + Expect(err).ToNot(HaveOccurred(), "while creating pod") + + err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod) + Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running") + + By("Failing liveness probe") + out, err := podContainerExec(pod, 1, fmt.Sprintf("rm %v", probeFilePath)) + framework.Logf("Pod exec output: %v", out) + Expect(err).ToNot(HaveOccurred(), "while failing liveness probe") + + // Check that container has restarted + By("Waiting for container to restart") + restarts := int32(0) + err = wait.PollImmediate(10*time.Second, 2*time.Minute, func() (bool, error) { + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + for _, status := range pod.Status.ContainerStatuses { + if status.Name == pod.Spec.Containers[0].Name { + framework.Logf("Container %v, restarts: %v", status.Name, status.RestartCount) + restarts = status.RestartCount + if restarts > 0 { + framework.Logf("Container has restart count: %v", restarts) + return true, nil + } + } + } + return false, nil + }) + Expect(err).ToNot(HaveOccurred(), "while waiting for container to restart") + + // Fix liveness probe + By("Rewriting the file") + writeCmd := fmt.Sprintf("echo test-after > %v", probeFilePath) + out, err = podContainerExec(pod, 1, writeCmd) + framework.Logf("Pod exec output: %v", out) + Expect(err).ToNot(HaveOccurred(), "while rewriting the probe file") + + // Wait for container restarts to stabilize + By("Waiting for container to stop restarting") + stableCount := int(0) + stableThreshold := int(time.Minute / framework.Poll) + err = wait.PollImmediate(framework.Poll, 2*time.Minute, func() (bool, error) { + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + for _, status := range pod.Status.ContainerStatuses { + if status.Name == pod.Spec.Containers[0].Name { + if status.RestartCount == restarts { + stableCount++ + if stableCount > stableThreshold { + framework.Logf("Container restart has stabilized") + return true, nil + } + } else { + restarts = status.RestartCount + stableCount = 0 + framework.Logf("Container has restart count: %v", restarts) + } + break + } + } + return false, nil + }) + Expect(err).ToNot(HaveOccurred(), "while waiting for container to stabilize") +} + +func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete bool) { + // This is mostly copied from TestVolumeUnmountsFromDeletedPodWithForceOption() + + // Change to busybox + pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.BusyBox) + pod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", "sleep 100000"} + pod.Spec.Containers[1].Image = imageutils.GetE2EImage(imageutils.BusyBox) + pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"} + + // If grace period is too short, then there is not enough time for the volume + // manager to cleanup the volumes + gracePeriod := int64(30) + pod.Spec.TerminationGracePeriodSeconds = &gracePeriod + + By(fmt.Sprintf("Creating pod %s", pod.Name)) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + Expect(err).ToNot(HaveOccurred(), "while creating pod") + + err = framework.WaitForPodRunningInNamespace(f.ClientSet, pod) + Expect(err).ToNot(HaveOccurred(), "while waiting for pod to be running") + + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred(), "while getting pod") + + utils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true) +} + +func formatVolume(f *framework.Framework, volumeSource *v1.VolumeSource) { + var err error + // Launch pod to format the volume + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("volume-prep-%s", f.Namespace.Name), + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: fmt.Sprintf("init-volume-%s", f.Namespace.Name), + Image: imageutils.GetE2EImage(imageutils.BusyBox), + Command: []string{"/bin/sh", "-ec", "echo nothing"}, + VolumeMounts: []v1.VolumeMount{ + { + Name: volumeName, + MountPath: "/vol", + }, + }, + }, + }, + RestartPolicy: v1.RestartPolicyNever, + Volumes: []v1.Volume{ + { + Name: volumeName, + VolumeSource: *volumeSource, + }, + }, + }, + } + By(fmt.Sprintf("Creating pod to format volume %s", pod.Name)) + pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + Expect(err).ToNot(HaveOccurred(), "while creating volume init pod") + + err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace) + Expect(err).ToNot(HaveOccurred(), "while waiting for volume init pod to succeed") + + err = framework.DeletePodWithWait(f, f.ClientSet, pod) + Expect(err).ToNot(HaveOccurred(), "while deleting volume init pod") +} + +func initVolumeContent(f *framework.Framework, pod *v1.Pod, volumeFilepath, subpathFilepath string) { + setWriteCommand(volumeFilepath, &pod.Spec.Containers[1]) + setReadCommand(subpathFilepath, &pod.Spec.Containers[0]) + + By(fmt.Sprintf("Creating pod to write volume content %s", pod.Name)) + f.TestContainerOutput("subpath", pod, 0, []string{ + "content of file \"" + subpathFilepath + "\": mount-tester new file", + }) + + By(fmt.Sprintf("Deleting pod %s", pod.Name)) + err := framework.DeletePodWithWait(f, f.ClientSet, pod) + Expect(err).NotTo(HaveOccurred(), "while deleting pod") + + // This pod spec is going to be reused; reset all the commands + clearSubpathPodCommands(pod) +} + +func podContainerExec(pod *v1.Pod, containerIndex int, bashExec string) (string, error) { + return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--container", pod.Spec.Containers[containerIndex].Name, "--", "/bin/sh", "-c", bashExec) +} diff --git a/test/e2e/storage/testsuites/volume_io.go b/test/e2e/storage/testsuites/volume_io.go new file mode 100644 index 00000000000..497151cbb6d --- /dev/null +++ b/test/e2e/storage/testsuites/volume_io.go @@ -0,0 +1,359 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* + * This test checks that the plugin VolumeSources are working when pseudo-streaming + * various write sizes to mounted files. + */ + +package testsuites + +import ( + "fmt" + "math" + "path" + "strconv" + "strings" + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/drivers" + "k8s.io/kubernetes/test/e2e/storage/testpatterns" + "k8s.io/kubernetes/test/e2e/storage/utils" +) + +// MD5 hashes of the test file corresponding to each file size. +// Test files are generated in testVolumeIO() +// If test file generation algorithm changes, these must be recomputed. +var md5hashes = map[int64]string{ + testpatterns.FileSizeSmall: "5c34c2813223a7ca05a3c2f38c0d1710", + testpatterns.FileSizeMedium: "f2fa202b1ffeedda5f3a58bd1ae81104", + testpatterns.FileSizeLarge: "8d763edc71bd16217664793b5a15e403", +} + +type volumeIOTestSuite struct { + tsInfo TestSuiteInfo +} + +var _ TestSuite = &volumeIOTestSuite{} + +// InitVolumeIOTestSuite returns volumeIOTestSuite that implements TestSuite interface +func InitVolumeIOTestSuite() TestSuite { + return &volumeIOTestSuite{ + tsInfo: TestSuiteInfo{ + name: "volumeIO", + testPatterns: []testpatterns.TestPattern{ + testpatterns.DefaultFsInlineVolume, + testpatterns.DefaultFsPreprovisionedPV, + testpatterns.DefaultFsDynamicPV, + }, + }, + } +} + +func (t *volumeIOTestSuite) getTestSuiteInfo() TestSuiteInfo { + return t.tsInfo +} + +func (t *volumeIOTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver drivers.TestDriver) { +} + +func createVolumeIOTestInput(pattern testpatterns.TestPattern, resource genericVolumeTestResource) volumeIOTestInput { + var fsGroup *int64 + driver := resource.driver + dInfo := driver.GetDriverInfo() + f := dInfo.Framework + fileSizes := createFileSizes(dInfo.MaxFileSize) + volSource := resource.volSource + + if volSource == nil { + framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name) + } + + if dInfo.IsFsGroupSupported { + fsGroupVal := int64(1234) + fsGroup = &fsGroupVal + } + + return volumeIOTestInput{ + f: f, + name: dInfo.Name, + config: dInfo.Config, + volSource: *volSource, + testFile: fmt.Sprintf("%s_io_test_%s", dInfo.Name, f.Namespace.Name), + podSec: v1.PodSecurityContext{ + FSGroup: fsGroup, + }, + fileSizes: fileSizes, + } +} + +func (t *volumeIOTestSuite) execTest(driver drivers.TestDriver, pattern testpatterns.TestPattern) { + Context(getTestNameStr(t, pattern), func() { + var ( + resource genericVolumeTestResource + input volumeIOTestInput + needsCleanup bool + ) + + BeforeEach(func() { + needsCleanup = false + // Skip unsupported tests to avoid unnecessary resource initialization + skipUnsupportedTest(t, driver, pattern) + needsCleanup = true + + // Setup test resource for driver and testpattern + resource := genericVolumeTestResource{} + resource.setupResource(driver, pattern) + + // Create test input + input = createVolumeIOTestInput(pattern, resource) + }) + + AfterEach(func() { + if needsCleanup { + resource.cleanupResource(driver, pattern) + } + }) + + execTestVolumeIO(&input) + }) +} + +type volumeIOTestInput struct { + f *framework.Framework + name string + config framework.VolumeTestConfig + volSource v1.VolumeSource + testFile string + podSec v1.PodSecurityContext + fileSizes []int64 +} + +func execTestVolumeIO(input *volumeIOTestInput) { + It("should write files of various sizes, verify size, validate content [Slow]", func() { + f := input.f + cs := f.ClientSet + + err := testVolumeIO(f, cs, input.config, input.volSource, &input.podSec, input.testFile, input.fileSizes) + Expect(err).NotTo(HaveOccurred()) + }) +} + +func createFileSizes(maxFileSize int64) []int64 { + allFileSizes := []int64{ + testpatterns.FileSizeSmall, + testpatterns.FileSizeMedium, + testpatterns.FileSizeLarge, + } + fileSizes := []int64{} + + for _, size := range allFileSizes { + if size <= maxFileSize { + fileSizes = append(fileSizes, size) + } + } + + return fileSizes +} + +// Return the plugin's client pod spec. Use an InitContainer to setup the file i/o test env. +func makePodSpec(config framework.VolumeTestConfig, dir, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod { + volName := fmt.Sprintf("%s-%s", config.Prefix, "io-volume") + + var gracePeriod int64 = 1 + return &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: config.Prefix + "-io-client", + Labels: map[string]string{ + "role": config.Prefix + "-io-client", + }, + }, + Spec: v1.PodSpec{ + InitContainers: []v1.Container{ + { + Name: config.Prefix + "-io-init", + Image: framework.BusyBoxImage, + Command: []string{ + "/bin/sh", + "-c", + initCmd, + }, + VolumeMounts: []v1.VolumeMount{ + { + Name: volName, + MountPath: dir, + }, + }, + }, + }, + Containers: []v1.Container{ + { + Name: config.Prefix + "-io-client", + Image: framework.BusyBoxImage, + Command: []string{ + "/bin/sh", + "-c", + "sleep 3600", // keep pod alive until explicitly deleted + }, + VolumeMounts: []v1.VolumeMount{ + { + Name: volName, + MountPath: dir, + }, + }, + }, + }, + TerminationGracePeriodSeconds: &gracePeriod, + SecurityContext: podSecContext, + Volumes: []v1.Volume{ + { + Name: volName, + VolumeSource: volsrc, + }, + }, + RestartPolicy: v1.RestartPolicyNever, // want pod to fail if init container fails + NodeName: config.ClientNodeName, + NodeSelector: config.NodeSelector, + }, + } +} + +// Write `fsize` bytes to `fpath` in the pod, using dd and the `ddInput` file. +func writeToFile(pod *v1.Pod, fpath, ddInput string, fsize int64) error { + By(fmt.Sprintf("writing %d bytes to test file %s", fsize, fpath)) + loopCnt := fsize / testpatterns.MinFileSize + writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, ddInput, testpatterns.MinFileSize, fpath) + _, err := utils.PodExec(pod, writeCmd) + + return err +} + +// Verify that the test file is the expected size and contains the expected content. +func verifyFile(pod *v1.Pod, fpath string, expectSize int64, ddInput string) error { + By("verifying file size") + rtnstr, err := utils.PodExec(pod, fmt.Sprintf("stat -c %%s %s", fpath)) + if err != nil || rtnstr == "" { + return fmt.Errorf("unable to get file size via `stat %s`: %v", fpath, err) + } + size, err := strconv.Atoi(strings.TrimSuffix(rtnstr, "\n")) + if err != nil { + return fmt.Errorf("unable to convert string %q to int: %v", rtnstr, err) + } + if int64(size) != expectSize { + return fmt.Errorf("size of file %s is %d, expected %d", fpath, size, expectSize) + } + + By("verifying file hash") + rtnstr, err = utils.PodExec(pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath)) + if err != nil { + return fmt.Errorf("unable to test file hash via `md5sum %s`: %v", fpath, err) + } + actualHash := strings.TrimSuffix(rtnstr, "\n") + expectedHash, ok := md5hashes[expectSize] + if !ok { + return fmt.Errorf("File hash is unknown for file size %d. Was a new file size added to the test suite?", + expectSize) + } + if actualHash != expectedHash { + return fmt.Errorf("MD5 hash is incorrect for file %s with size %d. Expected: `%s`; Actual: `%s`", + fpath, expectSize, expectedHash, actualHash) + } + + return nil +} + +// Delete `fpath` to save some disk space on host. Delete errors are logged but ignored. +func deleteFile(pod *v1.Pod, fpath string) { + By(fmt.Sprintf("deleting test file %s...", fpath)) + _, err := utils.PodExec(pod, fmt.Sprintf("rm -f %s", fpath)) + if err != nil { + // keep going, the test dir will be deleted when the volume is unmounted + framework.Logf("unable to delete test file %s: %v\nerror ignored, continuing test", fpath, err) + } +} + +// Create the client pod and create files of the sizes passed in by the `fsizes` parameter. Delete the +// client pod and the new files when done. +// Note: the file name is appended to "/opt//", eg. "/opt/nfs/e2e-.../". +// Note: nil can be passed for the podSecContext parm, in which case it is ignored. +// Note: `fsizes` values are enforced to each be at least `MinFileSize` and a multiple of `MinFileSize` +// bytes. +func testVolumeIO(f *framework.Framework, cs clientset.Interface, config framework.VolumeTestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) { + dir := path.Join("/opt", config.Prefix, config.Namespace) + ddInput := path.Join(dir, "dd_if") + writeBlk := strings.Repeat("abcdefghijklmnopqrstuvwxyz123456", 32) // 1KiB value + loopCnt := testpatterns.MinFileSize / int64(len(writeBlk)) + // initContainer cmd to create and fill dd's input file. The initContainer is used to create + // the `dd` input file which is currently 1MiB. Rather than store a 1MiB go value, a loop is + // used to create a 1MiB file in the target directory. + initCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do echo -n %s >>%s; let i+=1; done", loopCnt, writeBlk, ddInput) + + clientPod := makePodSpec(config, dir, initCmd, volsrc, podSecContext) + + By(fmt.Sprintf("starting %s", clientPod.Name)) + podsNamespacer := cs.CoreV1().Pods(config.Namespace) + clientPod, err = podsNamespacer.Create(clientPod) + if err != nil { + return fmt.Errorf("failed to create client pod %q: %v", clientPod.Name, err) + } + defer func() { + // note the test dir will be removed when the kubelet unmounts it + By(fmt.Sprintf("deleting client pod %q...", clientPod.Name)) + e := framework.DeletePodWithWait(f, cs, clientPod) + if e != nil { + framework.Logf("client pod failed to delete: %v", e) + if err == nil { // delete err is returned if err is not set + err = e + } + } else { + framework.Logf("sleeping a bit so kubelet can unmount and detach the volume") + time.Sleep(framework.PodCleanupTimeout) + } + }() + + err = framework.WaitForPodRunningInNamespace(cs, clientPod) + if err != nil { + return fmt.Errorf("client pod %q not running: %v", clientPod.Name, err) + } + + // create files of the passed-in file sizes and verify test file size and content + for _, fsize := range fsizes { + // file sizes must be a multiple of `MinFileSize` + if math.Mod(float64(fsize), float64(testpatterns.MinFileSize)) != 0 { + fsize = fsize/testpatterns.MinFileSize + testpatterns.MinFileSize + } + fpath := path.Join(dir, fmt.Sprintf("%s-%d", file, fsize)) + if err = writeToFile(clientPod, fpath, ddInput, fsize); err != nil { + return err + } + if err = verifyFile(clientPod, fpath, fsize, ddInput); err != nil { + return err + } + deleteFile(clientPod, fpath) + } + + return +} diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go new file mode 100644 index 00000000000..e8f740ea3af --- /dev/null +++ b/test/e2e/storage/testsuites/volumemode.go @@ -0,0 +1,443 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testsuites + +import ( + "fmt" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + + "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/drivers" + "k8s.io/kubernetes/test/e2e/storage/testpatterns" + "k8s.io/kubernetes/test/e2e/storage/utils" +) + +const ( + noProvisioner = "kubernetes.io/no-provisioner" + pvNamePrefix = "pv" +) + +type volumeModeTestSuite struct { + tsInfo TestSuiteInfo +} + +var _ TestSuite = &volumeModeTestSuite{} + +// InitVolumeModeTestSuite returns volumeModeTestSuite that implements TestSuite interface +func InitVolumeModeTestSuite() TestSuite { + return &volumeModeTestSuite{ + tsInfo: TestSuiteInfo{ + name: "volumeMode", + featureTag: "[Feature:BlockVolume]", + testPatterns: []testpatterns.TestPattern{ + testpatterns.FsVolModePreprovisionedPV, + testpatterns.FsVolModeDynamicPV, + testpatterns.BlockVolModePreprovisionedPV, + testpatterns.BlockVolModeDynamicPV, + }, + }, + } +} + +func (t *volumeModeTestSuite) getTestSuiteInfo() TestSuiteInfo { + return t.tsInfo +} + +func (t *volumeModeTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver drivers.TestDriver) { +} + +func createVolumeModeTestInput(pattern testpatterns.TestPattern, resource volumeModeTestResource) volumeModeTestInput { + driver := resource.driver + dInfo := driver.GetDriverInfo() + f := dInfo.Framework + + return volumeModeTestInput{ + f: f, + sc: resource.sc, + pvc: resource.pvc, + pv: resource.pv, + testVolType: pattern.VolType, + nodeName: dInfo.Config.ClientNodeName, + volMode: pattern.VolMode, + isBlockSupported: dInfo.IsBlockSupported, + } +} + +func getVolumeModeTestFunc(pattern testpatterns.TestPattern, driver drivers.TestDriver) func(*volumeModeTestInput) { + dInfo := driver.GetDriverInfo() + isBlockSupported := dInfo.IsBlockSupported + volMode := pattern.VolMode + volType := pattern.VolType + + switch volType { + case testpatterns.PreprovisionedPV: + if volMode == v1.PersistentVolumeBlock && !isBlockSupported { + return testVolumeModeFailForPreprovisionedPV + } + return testVolumeModeSuccessForPreprovisionedPV + case testpatterns.DynamicPV: + if volMode == v1.PersistentVolumeBlock && !isBlockSupported { + return testVolumeModeFailForDynamicPV + } + return testVolumeModeSuccessForDynamicPV + default: + framework.Failf("Volume mode test doesn't support volType: %v", volType) + } + return nil +} + +func (t *volumeModeTestSuite) execTest(driver drivers.TestDriver, pattern testpatterns.TestPattern) { + Context(getTestNameStr(t, pattern), func() { + var ( + resource volumeModeTestResource + input volumeModeTestInput + testFunc func(*volumeModeTestInput) + needsCleanup bool + ) + + testFunc = getVolumeModeTestFunc(pattern, driver) + + BeforeEach(func() { + needsCleanup = false + // Skip unsupported tests to avoid unnecessary resource initialization + skipUnsupportedTest(t, driver, pattern) + needsCleanup = true + + // Setup test resource for driver and testpattern + resource := volumeModeTestResource{} + resource.setupResource(driver, pattern) + + // Create test input + input = createVolumeModeTestInput(pattern, resource) + }) + + AfterEach(func() { + if needsCleanup { + resource.cleanupResource(driver, pattern) + } + }) + + testFunc(&input) + }) +} + +type volumeModeTestResource struct { + driver drivers.TestDriver + + sc *storagev1.StorageClass + pvc *v1.PersistentVolumeClaim + pv *v1.PersistentVolume +} + +var _ TestResource = &volumeModeTestResource{} + +func (s *volumeModeTestResource) setupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) { + s.driver = driver + dInfo := driver.GetDriverInfo() + f := dInfo.Framework + ns := f.Namespace + fsType := pattern.FsType + volBindMode := storagev1.VolumeBindingImmediate + volMode := pattern.VolMode + volType := pattern.VolType + + var ( + scName string + pvSource *v1.PersistentVolumeSource + ) + + // Create volume for pre-provisioned volume tests + drivers.CreateVolume(driver, volType) + + switch volType { + case testpatterns.PreprovisionedPV: + if volMode == v1.PersistentVolumeBlock { + scName = fmt.Sprintf("%s-%s-sc-for-block", ns.Name, dInfo.Name) + } else if volMode == v1.PersistentVolumeFilesystem { + scName = fmt.Sprintf("%s-%s-sc-for-file", ns.Name, dInfo.Name) + } + if pDriver, ok := driver.(drivers.PreprovisionedPVTestDriver); ok { + pvSource = pDriver.GetPersistentVolumeSource(false, fsType) + if pvSource == nil { + framework.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name) + } + + sc, pvConfig, pvcConfig := generateConfigsForPreprovisionedPVTest(scName, volBindMode, volMode, *pvSource) + s.sc = sc + s.pv = framework.MakePersistentVolume(pvConfig) + s.pvc = framework.MakePersistentVolumeClaim(pvcConfig, ns.Name) + } + case testpatterns.DynamicPV: + if dDriver, ok := driver.(drivers.DynamicPVTestDriver); ok { + s.sc = dDriver.GetDynamicProvisionStorageClass(fsType) + if s.sc == nil { + framework.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name) + } + s.sc.VolumeBindingMode = &volBindMode + + claimSize := "2Gi" + s.pvc = getClaim(claimSize, ns.Name) + s.pvc.Spec.StorageClassName = &s.sc.Name + s.pvc.Spec.VolumeMode = &volMode + } + default: + framework.Failf("Volume mode test doesn't support: %s", volType) + } +} + +func (s *volumeModeTestResource) cleanupResource(driver drivers.TestDriver, pattern testpatterns.TestPattern) { + dInfo := driver.GetDriverInfo() + f := dInfo.Framework + cs := f.ClientSet + ns := f.Namespace + volType := pattern.VolType + + By("Deleting pv and pvc") + errs := framework.PVPVCCleanup(cs, ns.Name, s.pv, s.pvc) + if len(errs) > 0 { + framework.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs)) + } + By("Deleting sc") + if s.sc != nil { + deleteStorageClass(cs, s.sc.Name) + } + + // Cleanup volume for pre-provisioned volume tests + drivers.DeleteVolume(driver, volType) +} + +type volumeModeTestInput struct { + f *framework.Framework + sc *storagev1.StorageClass + pvc *v1.PersistentVolumeClaim + pv *v1.PersistentVolume + testVolType testpatterns.TestVolType + nodeName string + volMode v1.PersistentVolumeMode + isBlockSupported bool +} + +func testVolumeModeFailForPreprovisionedPV(input *volumeModeTestInput) { + It("should fail to create pod by failing to mount volume", func() { + f := input.f + cs := f.ClientSet + ns := f.Namespace + var err error + + By("Creating sc") + input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc) + Expect(err).NotTo(HaveOccurred()) + + By("Creating pv and pvc") + input.pv, err = cs.CoreV1().PersistentVolumes().Create(input.pv) + Expect(err).NotTo(HaveOccurred()) + + // Prebind pv + input.pvc.Spec.VolumeName = input.pv.Name + input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc) + Expect(err).NotTo(HaveOccurred()) + + framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, input.pv, input.pvc)) + + By("Creating pod") + pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc}, + false, "", false, false, framework.SELinuxLabel, + nil, input.nodeName, framework.PodStartTimeout) + defer func() { + framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod)) + }() + Expect(err).To(HaveOccurred()) + }) +} + +func testVolumeModeSuccessForPreprovisionedPV(input *volumeModeTestInput) { + It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() { + f := input.f + cs := f.ClientSet + ns := f.Namespace + var err error + + By("Creating sc") + input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc) + Expect(err).NotTo(HaveOccurred()) + + By("Creating pv and pvc") + input.pv, err = cs.CoreV1().PersistentVolumes().Create(input.pv) + Expect(err).NotTo(HaveOccurred()) + + // Prebind pv + input.pvc.Spec.VolumeName = input.pv.Name + input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc) + Expect(err).NotTo(HaveOccurred()) + + framework.ExpectNoError(framework.WaitOnPVandPVC(cs, ns.Name, input.pv, input.pvc)) + + By("Creating pod") + pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc}, + false, "", false, false, framework.SELinuxLabel, + nil, input.nodeName, framework.PodStartTimeout) + defer func() { + framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod)) + }() + Expect(err).NotTo(HaveOccurred()) + + By("Checking if persistent volume exists as expected volume mode") + checkVolumeModeOfPath(pod, input.volMode, "/mnt/volume1") + + By("Checking if read/write to persistent volume works properly") + checkReadWriteToPath(pod, input.volMode, "/mnt/volume1") + }) + // TODO(mkimuram): Add more tests +} + +func testVolumeModeFailForDynamicPV(input *volumeModeTestInput) { + It("should fail in binding dynamic provisioned PV to PVC", func() { + f := input.f + cs := f.ClientSet + ns := f.Namespace + var err error + + By("Creating sc") + input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc) + Expect(err).NotTo(HaveOccurred()) + + By("Creating pv and pvc") + input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc) + Expect(err).NotTo(HaveOccurred()) + + err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, input.pvc.Namespace, input.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) + Expect(err).To(HaveOccurred()) + }) +} + +func testVolumeModeSuccessForDynamicPV(input *volumeModeTestInput) { + It("should create sc, pod, pv, and pvc, read/write to the pv, and delete all created resources", func() { + f := input.f + cs := f.ClientSet + ns := f.Namespace + var err error + + By("Creating sc") + input.sc, err = cs.StorageV1().StorageClasses().Create(input.sc) + Expect(err).NotTo(HaveOccurred()) + + By("Creating pv and pvc") + input.pvc, err = cs.CoreV1().PersistentVolumeClaims(ns.Name).Create(input.pvc) + Expect(err).NotTo(HaveOccurred()) + + err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, input.pvc.Namespace, input.pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) + Expect(err).NotTo(HaveOccurred()) + + input.pvc, err = cs.CoreV1().PersistentVolumeClaims(input.pvc.Namespace).Get(input.pvc.Name, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + input.pv, err = cs.CoreV1().PersistentVolumes().Get(input.pvc.Spec.VolumeName, metav1.GetOptions{}) + Expect(err).NotTo(HaveOccurred()) + + By("Creating pod") + pod, err := framework.CreateSecPodWithNodeName(cs, ns.Name, []*v1.PersistentVolumeClaim{input.pvc}, + false, "", false, false, framework.SELinuxLabel, + nil, input.nodeName, framework.PodStartTimeout) + defer func() { + framework.ExpectNoError(framework.DeletePodWithWait(f, cs, pod)) + }() + Expect(err).NotTo(HaveOccurred()) + + By("Checking if persistent volume exists as expected volume mode") + checkVolumeModeOfPath(pod, input.volMode, "/mnt/volume1") + + By("Checking if read/write to persistent volume works properly") + checkReadWriteToPath(pod, input.volMode, "/mnt/volume1") + }) + // TODO(mkimuram): Add more tests +} + +func generateConfigsForPreprovisionedPVTest(scName string, volBindMode storagev1.VolumeBindingMode, + volMode v1.PersistentVolumeMode, pvSource v1.PersistentVolumeSource) (*storagev1.StorageClass, + framework.PersistentVolumeConfig, framework.PersistentVolumeClaimConfig) { + // StorageClass + scConfig := &storagev1.StorageClass{ + ObjectMeta: metav1.ObjectMeta{ + Name: scName, + }, + Provisioner: noProvisioner, + VolumeBindingMode: &volBindMode, + } + // PV + pvConfig := framework.PersistentVolumeConfig{ + PVSource: pvSource, + NamePrefix: pvNamePrefix, + StorageClassName: scName, + VolumeMode: &volMode, + } + // PVC + pvcConfig := framework.PersistentVolumeClaimConfig{ + AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, + StorageClassName: &scName, + VolumeMode: &volMode, + } + + return scConfig, pvConfig, pvcConfig +} + +func checkVolumeModeOfPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) { + if volMode == v1.PersistentVolumeBlock { + // Check if block exists + utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("test -b %s", path)) + + // Double check that it's not directory + utils.VerifyExecInPodFail(pod, fmt.Sprintf("test -d %s", path), 1) + } else { + // Check if directory exists + utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("test -d %s", path)) + + // Double check that it's not block + utils.VerifyExecInPodFail(pod, fmt.Sprintf("test -b %s", path), 1) + } +} + +func checkReadWriteToPath(pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) { + if volMode == v1.PersistentVolumeBlock { + // random -> file1 + utils.VerifyExecInPodSucceed(pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1") + // file1 -> dev (write to dev) + utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path)) + // dev -> file2 (read from dev) + utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path)) + // file1 == file2 (check contents) + utils.VerifyExecInPodSucceed(pod, "diff /tmp/file1 /tmp/file2") + // Clean up temp files + utils.VerifyExecInPodSucceed(pod, "rm -f /tmp/file1 /tmp/file2") + + // Check that writing file to block volume fails + utils.VerifyExecInPodFail(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1) + } else { + // text -> file1 (write to file) + utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path)) + // grep file1 (read from file and check contents) + utils.VerifyExecInPodSucceed(pod, fmt.Sprintf("grep 'Hello world.' %s/file1.txt", path)) + + // Check that writing to directory as block volume fails + utils.VerifyExecInPodFail(pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1) + } +} diff --git a/test/e2e/storage/testsuites/volumes.go b/test/e2e/storage/testsuites/volumes.go new file mode 100644 index 00000000000..c7c65110804 --- /dev/null +++ b/test/e2e/storage/testsuites/volumes.go @@ -0,0 +1,160 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This test checks that various VolumeSources are working. + +// test/e2e/common/volumes.go duplicates the GlusterFS test from this file. Any changes made to this +// test should be made there as well. + +package testsuites + +import ( + "fmt" + + . "github.com/onsi/ginkgo" + "k8s.io/kubernetes/test/e2e/framework" + "k8s.io/kubernetes/test/e2e/storage/drivers" + "k8s.io/kubernetes/test/e2e/storage/testpatterns" +) + +type volumesTestSuite struct { + tsInfo TestSuiteInfo +} + +var _ TestSuite = &volumesTestSuite{} + +// InitVolumesTestSuite returns volumesTestSuite that implements TestSuite interface +func InitVolumesTestSuite() TestSuite { + return &volumesTestSuite{ + tsInfo: TestSuiteInfo{ + name: "volumes", + testPatterns: []testpatterns.TestPattern{ + // Default fsType + testpatterns.DefaultFsInlineVolume, + testpatterns.DefaultFsPreprovisionedPV, + testpatterns.DefaultFsDynamicPV, + // ext3 + testpatterns.Ext3InlineVolume, + testpatterns.Ext3PreprovisionedPV, + testpatterns.Ext3DynamicPV, + // ext4 + testpatterns.Ext4InlineVolume, + testpatterns.Ext4PreprovisionedPV, + testpatterns.Ext4DynamicPV, + // xfs + testpatterns.XfsInlineVolume, + testpatterns.XfsPreprovisionedPV, + testpatterns.XfsDynamicPV, + }, + }, + } +} + +func (t *volumesTestSuite) getTestSuiteInfo() TestSuiteInfo { + return t.tsInfo +} + +func (t *volumesTestSuite) skipUnsupportedTest(pattern testpatterns.TestPattern, driver drivers.TestDriver) { + dInfo := driver.GetDriverInfo() + if !dInfo.IsPersistent { + framework.Skipf("Driver %q does not provide persistency - skipping", dInfo.Name) + } +} + +func createVolumesTestInput(pattern testpatterns.TestPattern, resource genericVolumeTestResource) volumesTestInput { + var fsGroup *int64 + driver := resource.driver + dInfo := driver.GetDriverInfo() + f := dInfo.Framework + volSource := resource.volSource + + if volSource == nil { + framework.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name) + } + + if dInfo.IsFsGroupSupported { + fsGroupVal := int64(1234) + fsGroup = &fsGroupVal + } + + return volumesTestInput{ + f: f, + name: dInfo.Name, + config: dInfo.Config, + fsGroup: fsGroup, + tests: []framework.VolumeTest{ + { + Volume: *volSource, + File: "index.html", + // Must match content + ExpectedContent: fmt.Sprintf("Hello from %s from namespace %s", + dInfo.Name, f.Namespace.Name), + }, + }, + } +} + +func (t *volumesTestSuite) execTest(driver drivers.TestDriver, pattern testpatterns.TestPattern) { + Context(getTestNameStr(t, pattern), func() { + var ( + resource genericVolumeTestResource + input volumesTestInput + needsCleanup bool + ) + + BeforeEach(func() { + needsCleanup = false + // Skip unsupported tests to avoid unnecessary resource initialization + skipUnsupportedTest(t, driver, pattern) + needsCleanup = true + + // Setup test resource for driver and testpattern + resource := genericVolumeTestResource{} + resource.setupResource(driver, pattern) + + // Create test input + input = createVolumesTestInput(pattern, resource) + }) + + AfterEach(func() { + if needsCleanup { + resource.cleanupResource(driver, pattern) + } + }) + + testVolumes(&input) + }) +} + +type volumesTestInput struct { + f *framework.Framework + name string + config framework.VolumeTestConfig + fsGroup *int64 + tests []framework.VolumeTest +} + +func testVolumes(input *volumesTestInput) { + It("should be mountable", func() { + f := input.f + cs := f.ClientSet + defer framework.VolumeTestCleanup(f, input.config) + + volumeTest := input.tests + framework.InjectHtml(cs, input.config, volumeTest[0].Volume, volumeTest[0].ExpectedContent) + framework.TestVolumeClient(cs, input.config, input.fsGroup, input.tests) + }) +} diff --git a/test/e2e/storage/volume_io.go b/test/e2e/storage/volume_io.go deleted file mode 100644 index 9ed5e245fff..00000000000 --- a/test/e2e/storage/volume_io.go +++ /dev/null @@ -1,434 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -/* - * This test checks that the plugin VolumeSources are working when pseudo-streaming - * various write sizes to mounted files. Note that the plugin is defined inline in - * the pod spec, not via a persistent volume and claim. - * - * These tests work only when privileged containers are allowed, exporting various - * filesystems (NFS, GlusterFS, ...) usually needs some mounting or other privileged - * magic in the server pod. Note that the server containers are for testing purposes - * only and should not be used in production. - */ - -package storage - -import ( - "fmt" - "math" - "path" - "strconv" - "strings" - "time" - - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/test/e2e/framework" - "k8s.io/kubernetes/test/e2e/storage/utils" -) - -const ( - minFileSize = 1 * framework.MiB - - fileSizeSmall = 1 * framework.MiB - fileSizeMedium = 100 * framework.MiB - fileSizeLarge = 1 * framework.GiB -) - -// MD5 hashes of the test file corresponding to each file size. -// Test files are generated in testVolumeIO() -// If test file generation algorithm changes, these must be recomputed. -var md5hashes = map[int64]string{ - fileSizeSmall: "5c34c2813223a7ca05a3c2f38c0d1710", - fileSizeMedium: "f2fa202b1ffeedda5f3a58bd1ae81104", - fileSizeLarge: "8d763edc71bd16217664793b5a15e403", -} - -// Return the plugin's client pod spec. Use an InitContainer to setup the file i/o test env. -func makePodSpec(config framework.VolumeTestConfig, dir, initCmd string, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext) *v1.Pod { - volName := fmt.Sprintf("%s-%s", config.Prefix, "io-volume") - - var gracePeriod int64 = 1 - return &v1.Pod{ - TypeMeta: metav1.TypeMeta{ - Kind: "Pod", - APIVersion: "v1", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: config.Prefix + "-io-client", - Labels: map[string]string{ - "role": config.Prefix + "-io-client", - }, - }, - Spec: v1.PodSpec{ - InitContainers: []v1.Container{ - { - Name: config.Prefix + "-io-init", - Image: framework.BusyBoxImage, - Command: []string{ - "/bin/sh", - "-c", - initCmd, - }, - VolumeMounts: []v1.VolumeMount{ - { - Name: volName, - MountPath: dir, - }, - }, - }, - }, - Containers: []v1.Container{ - { - Name: config.Prefix + "-io-client", - Image: framework.BusyBoxImage, - Command: []string{ - "/bin/sh", - "-c", - "sleep 3600", // keep pod alive until explicitly deleted - }, - VolumeMounts: []v1.VolumeMount{ - { - Name: volName, - MountPath: dir, - }, - }, - }, - }, - TerminationGracePeriodSeconds: &gracePeriod, - SecurityContext: podSecContext, - Volumes: []v1.Volume{ - { - Name: volName, - VolumeSource: volsrc, - }, - }, - RestartPolicy: v1.RestartPolicyNever, // want pod to fail if init container fails - }, - } -} - -// Write `fsize` bytes to `fpath` in the pod, using dd and the `dd_input` file. -func writeToFile(pod *v1.Pod, fpath, dd_input string, fsize int64) error { - By(fmt.Sprintf("writing %d bytes to test file %s", fsize, fpath)) - loopCnt := fsize / minFileSize - writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, dd_input, minFileSize, fpath) - _, err := utils.PodExec(pod, writeCmd) - - return err -} - -// Verify that the test file is the expected size and contains the expected content. -func verifyFile(pod *v1.Pod, fpath string, expectSize int64, dd_input string) error { - By("verifying file size") - rtnstr, err := utils.PodExec(pod, fmt.Sprintf("stat -c %%s %s", fpath)) - if err != nil || rtnstr == "" { - return fmt.Errorf("unable to get file size via `stat %s`: %v", fpath, err) - } - size, err := strconv.Atoi(strings.TrimSuffix(rtnstr, "\n")) - if err != nil { - return fmt.Errorf("unable to convert string %q to int: %v", rtnstr, err) - } - if int64(size) != expectSize { - return fmt.Errorf("size of file %s is %d, expected %d", fpath, size, expectSize) - } - - By("verifying file hash") - rtnstr, err = utils.PodExec(pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath)) - if err != nil { - return fmt.Errorf("unable to test file hash via `md5sum %s`: %v", fpath, err) - } - actualHash := strings.TrimSuffix(rtnstr, "\n") - expectedHash, ok := md5hashes[expectSize] - if !ok { - return fmt.Errorf("File hash is unknown for file size %d. Was a new file size added to the test suite?", - expectSize) - } - if actualHash != expectedHash { - return fmt.Errorf("MD5 hash is incorrect for file %s with size %d. Expected: `%s`; Actual: `%s`", - fpath, expectSize, expectedHash, actualHash) - } - - return nil -} - -// Delete `fpath` to save some disk space on host. Delete errors are logged but ignored. -func deleteFile(pod *v1.Pod, fpath string) { - By(fmt.Sprintf("deleting test file %s...", fpath)) - _, err := utils.PodExec(pod, fmt.Sprintf("rm -f %s", fpath)) - if err != nil { - // keep going, the test dir will be deleted when the volume is unmounted - framework.Logf("unable to delete test file %s: %v\nerror ignored, continuing test", fpath, err) - } -} - -// Create the client pod and create files of the sizes passed in by the `fsizes` parameter. Delete the -// client pod and the new files when done. -// Note: the file name is appended to "/opt//", eg. "/opt/nfs/e2e-.../". -// Note: nil can be passed for the podSecContext parm, in which case it is ignored. -// Note: `fsizes` values are enforced to each be at least `minFileSize` and a multiple of `minFileSize` -// bytes. -func testVolumeIO(f *framework.Framework, cs clientset.Interface, config framework.VolumeTestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) { - dir := path.Join("/opt", config.Prefix, config.Namespace) - dd_input := path.Join(dir, "dd_if") - writeBlk := strings.Repeat("abcdefghijklmnopqrstuvwxyz123456", 32) // 1KiB value - loopCnt := minFileSize / int64(len(writeBlk)) - // initContainer cmd to create and fill dd's input file. The initContainer is used to create - // the `dd` input file which is currently 1MiB. Rather than store a 1MiB go value, a loop is - // used to create a 1MiB file in the target directory. - initCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do echo -n %s >>%s; let i+=1; done", loopCnt, writeBlk, dd_input) - - clientPod := makePodSpec(config, dir, initCmd, volsrc, podSecContext) - - By(fmt.Sprintf("starting %s", clientPod.Name)) - podsNamespacer := cs.CoreV1().Pods(config.Namespace) - clientPod, err = podsNamespacer.Create(clientPod) - if err != nil { - return fmt.Errorf("failed to create client pod %q: %v", clientPod.Name, err) - } - defer func() { - // note the test dir will be removed when the kubelet unmounts it - By(fmt.Sprintf("deleting client pod %q...", clientPod.Name)) - e := framework.DeletePodWithWait(f, cs, clientPod) - if e != nil { - framework.Logf("client pod failed to delete: %v", e) - if err == nil { // delete err is returned if err is not set - err = e - } - } else { - framework.Logf("sleeping a bit so kubelet can unmount and detach the volume") - time.Sleep(framework.PodCleanupTimeout) - } - }() - - err = framework.WaitForPodRunningInNamespace(cs, clientPod) - if err != nil { - return fmt.Errorf("client pod %q not running: %v", clientPod.Name, err) - } - - // create files of the passed-in file sizes and verify test file size and content - for _, fsize := range fsizes { - // file sizes must be a multiple of `minFileSize` - if math.Mod(float64(fsize), float64(minFileSize)) != 0 { - fsize = fsize/minFileSize + minFileSize - } - fpath := path.Join(dir, fmt.Sprintf("%s-%d", file, fsize)) - if err = writeToFile(clientPod, fpath, dd_input, fsize); err != nil { - return err - } - if err = verifyFile(clientPod, fpath, fsize, dd_input); err != nil { - return err - } - deleteFile(clientPod, fpath) - } - - return -} - -// These tests need privileged containers which are disabled by default. -// TODO: support all of the plugins tested in storage/volumes.go -var _ = utils.SIGDescribe("Volume plugin streaming [Slow]", func() { - f := framework.NewDefaultFramework("volume-io") - var ( - config framework.VolumeTestConfig - cs clientset.Interface - ns string - serverIP string - serverPod *v1.Pod - volSource v1.VolumeSource - ) - - BeforeEach(func() { - cs = f.ClientSet - ns = f.Namespace.Name - }) - - //////////////////////////////////////////////////////////////////////// - // NFS - //////////////////////////////////////////////////////////////////////// - Describe("NFS", func() { - testFile := "nfs_io_test" - // client pod uses selinux - podSec := v1.PodSecurityContext{ - SELinuxOptions: &v1.SELinuxOptions{ - Level: "s0:c0,c1", - }, - } - - BeforeEach(func() { - config, serverPod, serverIP = framework.NewNFSServer(cs, ns, []string{}) - volSource = v1.VolumeSource{ - NFS: &v1.NFSVolumeSource{ - Server: serverIP, - Path: "/", - ReadOnly: false, - }, - } - }) - - AfterEach(func() { - framework.Logf("AfterEach: deleting NFS server pod %q...", serverPod.Name) - err := framework.DeletePodWithWait(f, cs, serverPod) - Expect(err).NotTo(HaveOccurred(), "AfterEach: NFS server pod failed to delete") - }) - - It("should write files of various sizes, verify size, validate content", func() { - fileSizes := []int64{fileSizeSmall, fileSizeMedium, fileSizeLarge} - err := testVolumeIO(f, cs, config, volSource, &podSec, testFile, fileSizes) - Expect(err).NotTo(HaveOccurred()) - }) - }) - - //////////////////////////////////////////////////////////////////////// - // Gluster - //////////////////////////////////////////////////////////////////////// - Describe("GlusterFS", func() { - var name string - testFile := "gluster_io_test" - - BeforeEach(func() { - framework.SkipUnlessNodeOSDistroIs("gci") - // create gluster server and endpoints - config, serverPod, serverIP = framework.NewGlusterfsServer(cs, ns) - name = config.Prefix + "-server" - volSource = v1.VolumeSource{ - Glusterfs: &v1.GlusterfsVolumeSource{ - EndpointsName: name, - // 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh - Path: "test_vol", - ReadOnly: false, - }, - } - }) - - AfterEach(func() { - framework.Logf("AfterEach: deleting Gluster endpoints %q...", name) - epErr := cs.CoreV1().Endpoints(ns).Delete(name, nil) - framework.Logf("AfterEach: deleting Gluster server pod %q...", serverPod.Name) - err := framework.DeletePodWithWait(f, cs, serverPod) - if epErr != nil || err != nil { - if epErr != nil { - framework.Logf("AfterEach: Gluster delete endpoints failed: %v", err) - } - if err != nil { - framework.Logf("AfterEach: Gluster server pod delete failed: %v", err) - } - framework.Failf("AfterEach: cleanup failed") - } - }) - - It("should write files of various sizes, verify size, validate content", func() { - fileSizes := []int64{fileSizeSmall, fileSizeMedium} - err := testVolumeIO(f, cs, config, volSource, nil /*no secContext*/, testFile, fileSizes) - Expect(err).NotTo(HaveOccurred()) - }) - }) - - //////////////////////////////////////////////////////////////////////// - // iSCSI - // The iscsiadm utility and iscsi target kernel modules must be installed on all nodes. - //////////////////////////////////////////////////////////////////////// - Describe("iSCSI [Feature:Volumes]", func() { - testFile := "iscsi_io_test" - - BeforeEach(func() { - config, serverPod, serverIP = framework.NewISCSIServer(cs, ns) - volSource = v1.VolumeSource{ - ISCSI: &v1.ISCSIVolumeSource{ - TargetPortal: serverIP + ":3260", - // from test/images/volumes-tester/iscsi/initiatorname.iscsi - IQN: "iqn.2003-01.org.linux-iscsi.f21.x8664:sn.4b0aae584f7c", - Lun: 0, - FSType: "ext2", - ReadOnly: false, - }, - } - }) - - AfterEach(func() { - framework.Logf("AfterEach: deleting iSCSI server pod %q...", serverPod.Name) - err := framework.DeletePodWithWait(f, cs, serverPod) - Expect(err).NotTo(HaveOccurred(), "AfterEach: iSCSI server pod failed to delete") - }) - - It("should write files of various sizes, verify size, validate content", func() { - fileSizes := []int64{fileSizeSmall, fileSizeMedium} - fsGroup := int64(1234) - podSec := v1.PodSecurityContext{ - FSGroup: &fsGroup, - } - err := testVolumeIO(f, cs, config, volSource, &podSec, testFile, fileSizes) - Expect(err).NotTo(HaveOccurred()) - }) - }) - - //////////////////////////////////////////////////////////////////////// - // Ceph RBD - //////////////////////////////////////////////////////////////////////// - Describe("Ceph-RBD [Feature:Volumes]", func() { - var ( - secret *v1.Secret - ) - testFile := "ceph-rbd_io_test" - - BeforeEach(func() { - config, serverPod, secret, serverIP = framework.NewRBDServer(cs, ns) - volSource = v1.VolumeSource{ - RBD: &v1.RBDVolumeSource{ - CephMonitors: []string{serverIP}, - RBDPool: "rbd", - RBDImage: "foo", - RadosUser: "admin", - SecretRef: &v1.LocalObjectReference{ - Name: secret.Name, - }, - FSType: "ext2", - ReadOnly: false, - }, - } - }) - - AfterEach(func() { - framework.Logf("AfterEach: deleting Ceph-RDB server secret %q...", secret.Name) - secErr := cs.CoreV1().Secrets(ns).Delete(secret.Name, &metav1.DeleteOptions{}) - framework.Logf("AfterEach: deleting Ceph-RDB server pod %q...", serverPod.Name) - err := framework.DeletePodWithWait(f, cs, serverPod) - if secErr != nil || err != nil { - if secErr != nil { - framework.Logf("AfterEach: Ceph-RDB delete secret failed: %v", secErr) - } - if err != nil { - framework.Logf("AfterEach: Ceph-RDB server pod delete failed: %v", err) - } - framework.Failf("AfterEach: cleanup failed") - } - }) - - It("should write files of various sizes, verify size, validate content", func() { - fileSizes := []int64{fileSizeSmall, fileSizeMedium} - fsGroup := int64(1234) - podSec := v1.PodSecurityContext{ - FSGroup: &fsGroup, - } - err := testVolumeIO(f, cs, config, volSource, &podSec, testFile, fileSizes) - Expect(err).NotTo(HaveOccurred()) - }) - }) -}) diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index 229e1954e80..ce2bb1ed700 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -1105,7 +1105,7 @@ func updateDefaultStorageClass(c clientset.Interface, scName string, defaultStr verifyDefaultStorageClass(c, scName, expectedDefault) } -func newClaim(t storageClassTest, ns, suffix string) *v1.PersistentVolumeClaim { +func getClaim(claimSize string, ns string) *v1.PersistentVolumeClaim { claim := v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "pvc-", @@ -1117,7 +1117,7 @@ func newClaim(t storageClassTest, ns, suffix string) *v1.PersistentVolumeClaim { }, Resources: v1.ResourceRequirements{ Requests: v1.ResourceList{ - v1.ResourceName(v1.ResourceStorage): resource.MustParse(t.claimSize), + v1.ResourceName(v1.ResourceStorage): resource.MustParse(claimSize), }, }, }, @@ -1126,6 +1126,10 @@ func newClaim(t storageClassTest, ns, suffix string) *v1.PersistentVolumeClaim { return &claim } +func newClaim(t storageClassTest, ns, suffix string) *v1.PersistentVolumeClaim { + return getClaim(t.claimSize, ns) +} + // runInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory. func runInPodWithVolume(c clientset.Interface, ns, claimName, nodeName, command string) { pod := &v1.Pod{ @@ -1217,6 +1221,20 @@ func newStorageClass(t storageClassTest, ns string, suffix string) *storage.Stor if t.delayBinding { bindingMode = storage.VolumeBindingWaitForFirstConsumer } + return getStorageClass(pluginName, t.parameters, &bindingMode, ns, suffix) +} + +func getStorageClass( + provisioner string, + parameters map[string]string, + bindingMode *storage.VolumeBindingMode, + ns string, + suffix string, +) *storage.StorageClass { + if bindingMode == nil { + defaultBindingMode := storage.VolumeBindingImmediate + bindingMode = &defaultBindingMode + } return &storage.StorageClass{ TypeMeta: metav1.TypeMeta{ Kind: "StorageClass", @@ -1225,9 +1243,9 @@ func newStorageClass(t storageClassTest, ns string, suffix string) *storage.Stor // Name must be unique, so let's base it on namespace name Name: ns + "-" + suffix, }, - Provisioner: pluginName, - Parameters: t.parameters, - VolumeBindingMode: &bindingMode, + Provisioner: provisioner, + Parameters: parameters, + VolumeBindingMode: bindingMode, } } diff --git a/test/e2e/storage/volumes.go b/test/e2e/storage/volumes.go index 1dae5342fd3..51f2ccb7ebb 100644 --- a/test/e2e/storage/volumes.go +++ b/test/e2e/storage/volumes.go @@ -14,72 +14,18 @@ See the License for the specific language governing permissions and limitations under the License. */ -/* - * This test checks that various VolumeSources are working. - * - * There are two ways, how to test the volumes: - * 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...) - * The test creates a server pod, exporting simple 'index.html' file. - * Then it uses appropriate VolumeSource to import this file into a client pod - * and checks that the pod can see the file. It does so by importing the file - * into web server root and loadind the index.html from it. - * - * These tests work only when privileged containers are allowed, exporting - * various filesystems (NFS, GlusterFS, ...) usually needs some mounting or - * other privileged magic in the server pod. - * - * Note that the server containers are for testing purposes only and should not - * be used in production. - * - * 2) With server outside of Kubernetes (Cinder, ...) - * Appropriate server (e.g. OpenStack Cinder) must exist somewhere outside - * the tested Kubernetes cluster. The test itself creates a new volume, - * and checks, that Kubernetes can use it as a volume. - */ - -// test/e2e/common/volumes.go duplicates the GlusterFS test from this file. Any changes made to this -// test should be made there as well. - +// This test is volumes test for configmap. package storage import ( - "os/exec" - "strings" - "time" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" - kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/storage/utils" - vspheretest "k8s.io/kubernetes/test/e2e/storage/vsphere" ) -func DeleteCinderVolume(name string) error { - // Try to delete the volume for several seconds - it takes - // a while for the plugin to detach it. - var output []byte - var err error - timeout := time.Second * 120 - - framework.Logf("Waiting up to %v for removal of cinder volume %s", timeout, name) - for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) { - output, err = exec.Command("cinder", "delete", name).CombinedOutput() - if err == nil { - framework.Logf("Cinder volume %s deleted", name) - return nil - } else { - framework.Logf("Failed to delete volume %s: %v", name, err) - } - } - framework.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:])) - return err -} - // These tests need privileged containers, which are disabled by default. var _ = utils.SIGDescribe("Volumes", func() { f := framework.NewDefaultFramework("volume") @@ -94,277 +40,6 @@ var _ = utils.SIGDescribe("Volumes", func() { namespace = f.Namespace }) - //////////////////////////////////////////////////////////////////////// - // NFS - //////////////////////////////////////////////////////////////////////// - - Describe("NFS", func() { - It("should be mountable", func() { - config, _, serverIP := framework.NewNFSServer(cs, namespace.Name, []string{}) - defer framework.VolumeTestCleanup(f, config) - - tests := []framework.VolumeTest{ - { - Volume: v1.VolumeSource{ - NFS: &v1.NFSVolumeSource{ - Server: serverIP, - Path: "/", - ReadOnly: true, - }, - }, - File: "index.html", - // Must match content of test/images/volumes-tester/nfs/index.html - ExpectedContent: "Hello from NFS!", - }, - } - framework.TestVolumeClient(cs, config, nil, tests) - }) - }) - - //////////////////////////////////////////////////////////////////////// - // Gluster - //////////////////////////////////////////////////////////////////////// - - Describe("GlusterFS", func() { - It("should be mountable", func() { - //TODO (copejon) GFS is not supported on debian image. - framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu", "custom") - - // create gluster server and endpoints - config, _, _ := framework.NewGlusterfsServer(cs, namespace.Name) - name := config.Prefix + "-server" - defer func() { - framework.VolumeTestCleanup(f, config) - err := cs.CoreV1().Endpoints(namespace.Name).Delete(name, nil) - Expect(err).NotTo(HaveOccurred(), "defer: Gluster delete endpoints failed") - }() - - tests := []framework.VolumeTest{ - { - Volume: v1.VolumeSource{ - Glusterfs: &v1.GlusterfsVolumeSource{ - EndpointsName: name, - // 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh - Path: "test_vol", - ReadOnly: true, - }, - }, - File: "index.html", - // Must match content of test/images/volumes-tester/gluster/index.html - ExpectedContent: "Hello from GlusterFS!", - }, - } - framework.TestVolumeClient(cs, config, nil, tests) - }) - }) - - //////////////////////////////////////////////////////////////////////// - // iSCSI - //////////////////////////////////////////////////////////////////////// - - // The test needs privileged containers, which are disabled by default. - // Also, make sure that iscsiadm utility and iscsi target kernel modules - // are installed on all nodes! - // Run the test with "go run hack/e2e.go ... --ginkgo.focus=iSCSI" - - Describe("iSCSI [Feature:Volumes]", func() { - It("should be mountable", func() { - config, _, serverIP := framework.NewISCSIServer(cs, namespace.Name) - defer framework.VolumeTestCleanup(f, config) - - tests := []framework.VolumeTest{ - { - Volume: v1.VolumeSource{ - ISCSI: &v1.ISCSIVolumeSource{ - TargetPortal: serverIP + ":3260", - // from test/images/volumes-tester/iscsi/initiatorname.iscsi - IQN: "iqn.2003-01.org.linux-iscsi.f21.x8664:sn.4b0aae584f7c", - Lun: 0, - FSType: "ext2", - }, - }, - File: "index.html", - // Must match content of test/images/volumes-tester/iscsi/block.tar.gz - ExpectedContent: "Hello from iSCSI", - }, - } - fsGroup := int64(1234) - framework.TestVolumeClient(cs, config, &fsGroup, tests) - }) - }) - - //////////////////////////////////////////////////////////////////////// - // Ceph RBD - //////////////////////////////////////////////////////////////////////// - - Describe("Ceph RBD [Feature:Volumes]", func() { - It("should be mountable", func() { - config, _, secret, serverIP := framework.NewRBDServer(cs, namespace.Name) - defer framework.VolumeTestCleanup(f, config) - defer cs.CoreV1().Secrets(config.Namespace).Delete(secret.Name, nil) - - tests := []framework.VolumeTest{ - { - Volume: v1.VolumeSource{ - RBD: &v1.RBDVolumeSource{ - CephMonitors: []string{serverIP}, - RBDPool: "rbd", - RBDImage: "foo", - RadosUser: "admin", - SecretRef: &v1.LocalObjectReference{ - Name: secret.Name, - }, - FSType: "ext2", - }, - }, - File: "index.html", - // Must match content of test/images/volumes-tester/rbd/create_block.sh - ExpectedContent: "Hello from RBD", - }, - } - fsGroup := int64(1234) - framework.TestVolumeClient(cs, config, &fsGroup, tests) - }) - }) - - //////////////////////////////////////////////////////////////////////// - // Ceph - //////////////////////////////////////////////////////////////////////// - Describe("CephFS [Feature:Volumes]", func() { - It("should be mountable", func() { - config, _, secret, serverIP := framework.NewRBDServer(cs, namespace.Name) - defer framework.VolumeTestCleanup(f, config) - defer cs.CoreV1().Secrets(config.Namespace).Delete(secret.Name, nil) - - tests := []framework.VolumeTest{ - { - Volume: v1.VolumeSource{ - CephFS: &v1.CephFSVolumeSource{ - Monitors: []string{serverIP + ":6789"}, - User: "kube", - SecretRef: &v1.LocalObjectReference{Name: secret.Name}, - ReadOnly: true, - }, - }, - File: "index.html", - // Must match content of test/images/volumes-tester/ceph/index.html - ExpectedContent: "Hello Ceph!", - }, - } - framework.TestVolumeClient(cs, config, nil, tests) - }) - }) - - //////////////////////////////////////////////////////////////////////// - // OpenStack Cinder - //////////////////////////////////////////////////////////////////////// - - // This test assumes that OpenStack client tools are installed - // (/usr/bin/nova, /usr/bin/cinder and /usr/bin/keystone) - // and that the usual OpenStack authentication env. variables are set - // (OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME at least). - Describe("Cinder", func() { - It("should be mountable", func() { - framework.SkipUnlessProviderIs("openstack") - config := framework.VolumeTestConfig{ - Namespace: namespace.Name, - Prefix: "cinder", - } - - // We assume that namespace.Name is a random string - volumeName := namespace.Name - By("creating a test Cinder volume") - output, err := exec.Command("cinder", "create", "--display-name="+volumeName, "1").CombinedOutput() - outputString := string(output[:]) - framework.Logf("cinder output:\n%s", outputString) - Expect(err).NotTo(HaveOccurred()) - - defer DeleteCinderVolume(volumeName) - - // Parse 'id'' from stdout. Expected format: - // | attachments | [] | - // | availability_zone | nova | - // ... - // | id | 1d6ff08f-5d1c-41a4-ad72-4ef872cae685 | - volumeID := "" - for _, line := range strings.Split(outputString, "\n") { - fields := strings.Fields(line) - if len(fields) != 5 { - continue - } - if fields[1] != "id" { - continue - } - volumeID = fields[3] - break - } - framework.Logf("Volume ID: %s", volumeID) - Expect(volumeID).NotTo(Equal("")) - - defer func() { - framework.Logf("Running volumeTestCleanup") - framework.VolumeTestCleanup(f, config) - }() - - tests := []framework.VolumeTest{ - { - Volume: v1.VolumeSource{ - Cinder: &v1.CinderVolumeSource{ - VolumeID: volumeID, - FSType: "ext3", - ReadOnly: false, - }, - }, - File: "index.html", - // Randomize index.html to make sure we don't see the - // content from previous test runs. - ExpectedContent: "Hello from Cinder from namespace " + volumeName, - }, - } - - framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent) - - fsGroup := int64(1234) - framework.TestVolumeClient(cs, config, &fsGroup, tests) - }) - }) - - //////////////////////////////////////////////////////////////////////// - // GCE PD - //////////////////////////////////////////////////////////////////////// - Describe("PD", func() { - var config framework.VolumeTestConfig - - BeforeEach(func() { - framework.SkipUnlessProviderIs("gce", "gke") - config = framework.VolumeTestConfig{ - Namespace: namespace.Name, - Prefix: "pd", - // PD will be created in framework.TestContext.CloudConfig.Zone zone, - // so pods should be also scheduled there. - NodeSelector: map[string]string{ - kubeletapis.LabelZoneFailureDomain: framework.TestContext.CloudConfig.Zone, - }, - } - }) - - It("should be mountable with ext3", func() { - testGCEPD(f, config, cs, "ext3") - }) - It("should be mountable with ext4", func() { - testGCEPD(f, config, cs, "ext4") - }) - It("should be mountable with xfs", func() { - // xfs is not supported on gci - // and not installed by default on debian - framework.SkipUnlessNodeOSDistroIs("ubuntu", "custom") - testGCEPD(f, config, cs, "xfs") - }) - }) - - //////////////////////////////////////////////////////////////////////// - // ConfigMap - //////////////////////////////////////////////////////////////////////// Describe("ConfigMap", func() { It("should be mountable", func() { config := framework.VolumeTestConfig{ @@ -434,139 +109,4 @@ var _ = utils.SIGDescribe("Volumes", func() { framework.TestVolumeClient(cs, config, nil, tests) }) }) - - //////////////////////////////////////////////////////////////////////// - // vSphere - //////////////////////////////////////////////////////////////////////// - Describe("vsphere", func() { - It("should be mountable", func() { - framework.SkipUnlessProviderIs("vsphere") - vspheretest.Bootstrap(f) - nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo() - var volumePath string - config := framework.VolumeTestConfig{ - Namespace: namespace.Name, - Prefix: "vsphere", - } - volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef) - Expect(err).NotTo(HaveOccurred()) - - defer func() { - nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef) - }() - - defer func() { - framework.Logf("Running volumeTestCleanup") - framework.VolumeTestCleanup(f, config) - }() - - tests := []framework.VolumeTest{ - { - Volume: v1.VolumeSource{ - VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{ - VolumePath: volumePath, - FSType: "ext4", - }, - }, - File: "index.html", - // Randomize index.html to make sure we don't see the - // content from previous test runs. - ExpectedContent: "Hello from vSphere from namespace " + namespace.Name, - }, - } - - framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent) - - fsGroup := int64(1234) - framework.TestVolumeClient(cs, config, &fsGroup, tests) - }) - }) - - //////////////////////////////////////////////////////////////////////// - // Azure Disk - //////////////////////////////////////////////////////////////////////// - Describe("Azure Disk", func() { - It("should be mountable [Slow]", func() { - framework.SkipUnlessProviderIs("azure") - config := framework.VolumeTestConfig{ - Namespace: namespace.Name, - Prefix: "azure", - } - - By("creating a test azure disk volume") - volumeName, err := framework.CreatePDWithRetry() - Expect(err).NotTo(HaveOccurred()) - defer func() { - framework.DeletePDWithRetry(volumeName) - }() - - defer func() { - framework.Logf("Running volumeTestCleanup") - framework.VolumeTestCleanup(f, config) - }() - fsType := "ext4" - readOnly := false - diskName := volumeName[(strings.LastIndex(volumeName, "/") + 1):] - tests := []framework.VolumeTest{ - { - Volume: v1.VolumeSource{ - AzureDisk: &v1.AzureDiskVolumeSource{ - DiskName: diskName, - DataDiskURI: volumeName, - FSType: &fsType, - ReadOnly: &readOnly, - }, - }, - File: "index.html", - // Randomize index.html to make sure we don't see the - // content from previous test runs. - ExpectedContent: "Hello from Azure from namespace " + volumeName, - }, - } - - framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent) - - fsGroup := int64(1234) - framework.TestVolumeClient(cs, config, &fsGroup, tests) - }) - }) }) - -func testGCEPD(f *framework.Framework, config framework.VolumeTestConfig, cs clientset.Interface, fs string) { - By("creating a test gce pd volume") - volumeName, err := framework.CreatePDWithRetry() - Expect(err).NotTo(HaveOccurred()) - defer func() { - // - Get NodeName from the pod spec to which the volume is mounted. - // - Force detach and delete. - pod, err := f.PodClient().Get(config.Prefix+"-client", metav1.GetOptions{}) - Expect(err).NotTo(HaveOccurred(), "Failed getting pod %q.", config.Prefix+"-client") - detachAndDeletePDs(volumeName, []types.NodeName{types.NodeName(pod.Spec.NodeName)}) - }() - - defer func() { - framework.Logf("Running volumeTestCleanup") - framework.VolumeTestCleanup(f, config) - }() - - tests := []framework.VolumeTest{ - { - Volume: v1.VolumeSource{ - GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ - PDName: volumeName, - FSType: fs, - ReadOnly: false, - }, - }, - File: "index.html", - // Randomize index.html to make sure we don't see the - // content from previous test runs. - ExpectedContent: "Hello from GCE from namespace " + volumeName, - }, - } - - framework.InjectHtml(cs, config, tests[0].Volume, tests[0].ExpectedContent) - - fsGroup := int64(1234) - framework.TestVolumeClient(cs, config, &fsGroup, tests) -}