diff --git a/test/e2e/storage/BUILD b/test/e2e/storage/BUILD index fa0775cbe5e..adc8f943bea 100644 --- a/test/e2e/storage/BUILD +++ b/test/e2e/storage/BUILD @@ -45,6 +45,7 @@ go_library( "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/api/rbac/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library", + "//staging/src/k8s.io/api/storage/v1alpha1:go_default_library", "//staging/src/k8s.io/api/storage/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go index bebde13d776..3ca6f4be7ae 100644 --- a/test/e2e/storage/csi_mock_volume.go +++ b/test/e2e/storage/csi_mock_volume.go @@ -28,6 +28,7 @@ import ( "google.golang.org/grpc/codes" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -89,6 +90,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { lateBinding bool enableTopology bool podInfo *bool + storageCapacity *bool scName string enableResizing bool // enable resizing for both CSI mock driver and storageClass. enableNodeExpansion bool // enable node expansion for CSI mock driver @@ -124,6 +126,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { driverOpts := drivers.CSIMockDriverOpts{ RegisterDriver: tp.registerDriver, PodInfo: tp.podInfo, + StorageCapacity: tp.storageCapacity, EnableTopology: tp.enableTopology, AttachLimit: tp.attachLimit, DisableAttach: tp.disableAttach, @@ -218,10 +221,13 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { ginkgo.By(fmt.Sprintf("Deleting claim %s", claim.Name)) claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{}) if err == nil { - cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, metav1.DeleteOptions{}) - errs = append(errs, e2epv.WaitForPersistentVolumeDeleted(cs, claim.Spec.VolumeName, framework.Poll, 2*time.Minute)) + if err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, metav1.DeleteOptions{}); err != nil { + errs = append(errs, err) + } + if claim.Spec.VolumeName != "" { + errs = append(errs, e2epv.WaitForPersistentVolumeDeleted(cs, claim.Spec.VolumeName, framework.Poll, 2*time.Minute)) + } } - } for _, sc := range m.sc { @@ -944,6 +950,94 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { }) } }) + + // These tests *only* work on a cluster which has the CSIStorageCapacity feature enabled. + ginkgo.Context("CSIStorageCapacity [Feature:CSIStorageCapacity]", func() { + var ( + err error + yes = true + no = false + ) + // Tests that expect a failure are slow because we have to wait for a while + // to be sure that the volume isn't getting created. + // TODO: stop waiting as soon as we see the "node(s) did not have enough free storage" pod event? + tests := []struct { + name string + storageCapacity *bool + capacities []string + expectFailure bool + }{ + { + name: "CSIStorageCapacity unused", + }, + { + name: "CSIStorageCapacity disabled", + storageCapacity: &no, + }, + { + name: "CSIStorageCapacity used, no capacity [Slow]", + storageCapacity: &yes, + expectFailure: true, + }, + { + name: "CSIStorageCapacity used, have capacity", + storageCapacity: &yes, + capacities: []string{"100Gi"}, + }, + // We could add more test cases here for + // various situations, but covering those via + // the scheduler binder unit tests is faster. + } + for _, t := range tests { + test := t + ginkgo.It(t.name, func() { + init(testParameters{ + registerDriver: true, + scName: "csi-mock-sc-" + f.UniqueName, + storageCapacity: test.storageCapacity, + lateBinding: true, + }) + + defer cleanup() + + // kube-scheduler may need some time before it gets the CSIDriver object. + // Without it, scheduling will happen without considering capacity, which + // is not what we want to test. + time.Sleep(5 * time.Second) + + sc, _, pod := createPod(false /* persistent volume, late binding as specified above */) + + for _, capacityStr := range test.capacities { + capacityQuantity := resource.MustParse(capacityStr) + capacity := &storagev1alpha1.CSIStorageCapacity{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "fake-capacity-", + }, + // Empty topology, usable by any node. + StorageClassName: sc.Name, + NodeTopology: &metav1.LabelSelector{}, + Capacity: &capacityQuantity, + } + createdCapacity, err := f.ClientSet.StorageV1alpha1().CSIStorageCapacities(f.Namespace.Name).Create(context.Background(), capacity, metav1.CreateOptions{}) + framework.ExpectNoError(err, "create CSIStorageCapacity %+v", *capacity) + m.testCleanups = append(m.testCleanups, func() { + f.ClientSet.StorageV1alpha1().CSIStorageCapacities(f.Namespace.Name).Delete(context.Background(), createdCapacity.Name, metav1.DeleteOptions{}) + }) + } + + err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace) + if test.expectFailure { + framework.ExpectError(err, "pod unexpectedly started to run") + } else { + framework.ExpectNoError(err, "failed to start pod") + } + + ginkgo.By("Deleting the previously created pod") + err = e2epod.DeletePodWithWait(m.cs, pod) + framework.ExpectNoError(err, "while deleting") + }) + } + }) }) func waitForMaxVolumeCondition(pod *v1.Pod, cs clientset.Interface) error { diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index ebdddd0cc6e..88b9ef7a888 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -238,6 +238,7 @@ type mockCSIDriver struct { driverInfo testsuites.DriverInfo manifests []string podInfo *bool + storageCapacity *bool attachable bool attachLimit int enableTopology bool @@ -251,6 +252,7 @@ type CSIMockDriverOpts struct { RegisterDriver bool DisableAttach bool PodInfo *bool + StorageCapacity *bool AttachLimit int EnableTopology bool EnableResizing bool @@ -301,6 +303,7 @@ func InitMockCSIDriver(driverOpts CSIMockDriverOpts) testsuites.TestDriver { }, manifests: driverManifests, podInfo: driverOpts.PodInfo, + storageCapacity: driverOpts.StorageCapacity, enableTopology: driverOpts.EnableTopology, attachable: !driverOpts.DisableAttach, attachLimit: driverOpts.AttachLimit, @@ -392,6 +395,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest ProvisionerContainerName: "csi-provisioner", NodeName: node.Name, PodInfo: m.podInfo, + StorageCapacity: m.storageCapacity, CanAttach: &m.attachable, VolumeLifecycleModes: &[]storagev1.VolumeLifecycleMode{ storagev1.VolumeLifecyclePersistent, diff --git a/test/e2e/storage/utils/deployment.go b/test/e2e/storage/utils/deployment.go index 98da5777742..2692e833405 100644 --- a/test/e2e/storage/utils/deployment.go +++ b/test/e2e/storage/utils/deployment.go @@ -124,6 +124,9 @@ func PatchCSIDeployment(f *framework.Framework, o PatchCSIOptions, object interf if o.PodInfo != nil { object.Spec.PodInfoOnMount = o.PodInfo } + if o.StorageCapacity != nil { + object.Spec.StorageCapacity = o.StorageCapacity + } if o.CanAttach != nil { object.Spec.AttachRequired = o.CanAttach } @@ -169,6 +172,10 @@ type PatchCSIOptions struct { // field *if* the driver deploys a CSIDriver object. Ignored // otherwise. CanAttach *bool + // If not nil, the value to use for the CSIDriver.Spec.StorageCapacity + // field *if* the driver deploys a CSIDriver object. Ignored + // otherwise. + StorageCapacity *bool // If not nil, the value to use for the CSIDriver.Spec.VolumeLifecycleModes // field *if* the driver deploys a CSIDriver object. Ignored // otherwise.