mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
CSIStorageCapacity: E2E test with mock driver
We can create CSIStorageCapacity objects manually, therefore we don't need the updated external-provisioner for these tests.
This commit is contained in:
parent
0efbbe8555
commit
567ce87aee
@ -45,6 +45,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||||
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
|
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/api/storage/v1alpha1:go_default_library",
|
||||||
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
storagev1 "k8s.io/api/storage/v1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
|
storagev1alpha1 "k8s.io/api/storage/v1alpha1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@ -89,6 +90,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
lateBinding bool
|
lateBinding bool
|
||||||
enableTopology bool
|
enableTopology bool
|
||||||
podInfo *bool
|
podInfo *bool
|
||||||
|
storageCapacity *bool
|
||||||
scName string
|
scName string
|
||||||
enableResizing bool // enable resizing for both CSI mock driver and storageClass.
|
enableResizing bool // enable resizing for both CSI mock driver and storageClass.
|
||||||
enableNodeExpansion bool // enable node expansion for CSI mock driver
|
enableNodeExpansion bool // enable node expansion for CSI mock driver
|
||||||
@ -124,6 +126,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
driverOpts := drivers.CSIMockDriverOpts{
|
driverOpts := drivers.CSIMockDriverOpts{
|
||||||
RegisterDriver: tp.registerDriver,
|
RegisterDriver: tp.registerDriver,
|
||||||
PodInfo: tp.podInfo,
|
PodInfo: tp.podInfo,
|
||||||
|
StorageCapacity: tp.storageCapacity,
|
||||||
EnableTopology: tp.enableTopology,
|
EnableTopology: tp.enableTopology,
|
||||||
AttachLimit: tp.attachLimit,
|
AttachLimit: tp.attachLimit,
|
||||||
DisableAttach: tp.disableAttach,
|
DisableAttach: tp.disableAttach,
|
||||||
@ -218,10 +221,13 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
ginkgo.By(fmt.Sprintf("Deleting claim %s", claim.Name))
|
ginkgo.By(fmt.Sprintf("Deleting claim %s", claim.Name))
|
||||||
claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{})
|
claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, metav1.DeleteOptions{})
|
if err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, metav1.DeleteOptions{}); err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
|
if claim.Spec.VolumeName != "" {
|
||||||
errs = append(errs, e2epv.WaitForPersistentVolumeDeleted(cs, claim.Spec.VolumeName, framework.Poll, 2*time.Minute))
|
errs = append(errs, e2epv.WaitForPersistentVolumeDeleted(cs, claim.Spec.VolumeName, framework.Poll, 2*time.Minute))
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, sc := range m.sc {
|
for _, sc := range m.sc {
|
||||||
@ -944,6 +950,94 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// These tests *only* work on a cluster which has the CSIStorageCapacity feature enabled.
|
||||||
|
ginkgo.Context("CSIStorageCapacity [Feature:CSIStorageCapacity]", func() {
|
||||||
|
var (
|
||||||
|
err error
|
||||||
|
yes = true
|
||||||
|
no = false
|
||||||
|
)
|
||||||
|
// Tests that expect a failure are slow because we have to wait for a while
|
||||||
|
// to be sure that the volume isn't getting created.
|
||||||
|
// TODO: stop waiting as soon as we see the "node(s) did not have enough free storage" pod event?
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
storageCapacity *bool
|
||||||
|
capacities []string
|
||||||
|
expectFailure bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "CSIStorageCapacity unused",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "CSIStorageCapacity disabled",
|
||||||
|
storageCapacity: &no,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "CSIStorageCapacity used, no capacity [Slow]",
|
||||||
|
storageCapacity: &yes,
|
||||||
|
expectFailure: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "CSIStorageCapacity used, have capacity",
|
||||||
|
storageCapacity: &yes,
|
||||||
|
capacities: []string{"100Gi"},
|
||||||
|
},
|
||||||
|
// We could add more test cases here for
|
||||||
|
// various situations, but covering those via
|
||||||
|
// the scheduler binder unit tests is faster.
|
||||||
|
}
|
||||||
|
for _, t := range tests {
|
||||||
|
test := t
|
||||||
|
ginkgo.It(t.name, func() {
|
||||||
|
init(testParameters{
|
||||||
|
registerDriver: true,
|
||||||
|
scName: "csi-mock-sc-" + f.UniqueName,
|
||||||
|
storageCapacity: test.storageCapacity,
|
||||||
|
lateBinding: true,
|
||||||
|
})
|
||||||
|
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// kube-scheduler may need some time before it gets the CSIDriver object.
|
||||||
|
// Without it, scheduling will happen without considering capacity, which
|
||||||
|
// is not what we want to test.
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
|
||||||
|
sc, _, pod := createPod(false /* persistent volume, late binding as specified above */)
|
||||||
|
|
||||||
|
for _, capacityStr := range test.capacities {
|
||||||
|
capacityQuantity := resource.MustParse(capacityStr)
|
||||||
|
capacity := &storagev1alpha1.CSIStorageCapacity{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
GenerateName: "fake-capacity-",
|
||||||
|
},
|
||||||
|
// Empty topology, usable by any node.
|
||||||
|
StorageClassName: sc.Name,
|
||||||
|
NodeTopology: &metav1.LabelSelector{},
|
||||||
|
Capacity: &capacityQuantity,
|
||||||
|
}
|
||||||
|
createdCapacity, err := f.ClientSet.StorageV1alpha1().CSIStorageCapacities(f.Namespace.Name).Create(context.Background(), capacity, metav1.CreateOptions{})
|
||||||
|
framework.ExpectNoError(err, "create CSIStorageCapacity %+v", *capacity)
|
||||||
|
m.testCleanups = append(m.testCleanups, func() {
|
||||||
|
f.ClientSet.StorageV1alpha1().CSIStorageCapacities(f.Namespace.Name).Delete(context.Background(), createdCapacity.Name, metav1.DeleteOptions{})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
|
||||||
|
if test.expectFailure {
|
||||||
|
framework.ExpectError(err, "pod unexpectedly started to run")
|
||||||
|
} else {
|
||||||
|
framework.ExpectNoError(err, "failed to start pod")
|
||||||
|
}
|
||||||
|
|
||||||
|
ginkgo.By("Deleting the previously created pod")
|
||||||
|
err = e2epod.DeletePodWithWait(m.cs, pod)
|
||||||
|
framework.ExpectNoError(err, "while deleting")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
func waitForMaxVolumeCondition(pod *v1.Pod, cs clientset.Interface) error {
|
func waitForMaxVolumeCondition(pod *v1.Pod, cs clientset.Interface) error {
|
||||||
|
@ -238,6 +238,7 @@ type mockCSIDriver struct {
|
|||||||
driverInfo testsuites.DriverInfo
|
driverInfo testsuites.DriverInfo
|
||||||
manifests []string
|
manifests []string
|
||||||
podInfo *bool
|
podInfo *bool
|
||||||
|
storageCapacity *bool
|
||||||
attachable bool
|
attachable bool
|
||||||
attachLimit int
|
attachLimit int
|
||||||
enableTopology bool
|
enableTopology bool
|
||||||
@ -251,6 +252,7 @@ type CSIMockDriverOpts struct {
|
|||||||
RegisterDriver bool
|
RegisterDriver bool
|
||||||
DisableAttach bool
|
DisableAttach bool
|
||||||
PodInfo *bool
|
PodInfo *bool
|
||||||
|
StorageCapacity *bool
|
||||||
AttachLimit int
|
AttachLimit int
|
||||||
EnableTopology bool
|
EnableTopology bool
|
||||||
EnableResizing bool
|
EnableResizing bool
|
||||||
@ -301,6 +303,7 @@ func InitMockCSIDriver(driverOpts CSIMockDriverOpts) testsuites.TestDriver {
|
|||||||
},
|
},
|
||||||
manifests: driverManifests,
|
manifests: driverManifests,
|
||||||
podInfo: driverOpts.PodInfo,
|
podInfo: driverOpts.PodInfo,
|
||||||
|
storageCapacity: driverOpts.StorageCapacity,
|
||||||
enableTopology: driverOpts.EnableTopology,
|
enableTopology: driverOpts.EnableTopology,
|
||||||
attachable: !driverOpts.DisableAttach,
|
attachable: !driverOpts.DisableAttach,
|
||||||
attachLimit: driverOpts.AttachLimit,
|
attachLimit: driverOpts.AttachLimit,
|
||||||
@ -392,6 +395,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest
|
|||||||
ProvisionerContainerName: "csi-provisioner",
|
ProvisionerContainerName: "csi-provisioner",
|
||||||
NodeName: node.Name,
|
NodeName: node.Name,
|
||||||
PodInfo: m.podInfo,
|
PodInfo: m.podInfo,
|
||||||
|
StorageCapacity: m.storageCapacity,
|
||||||
CanAttach: &m.attachable,
|
CanAttach: &m.attachable,
|
||||||
VolumeLifecycleModes: &[]storagev1.VolumeLifecycleMode{
|
VolumeLifecycleModes: &[]storagev1.VolumeLifecycleMode{
|
||||||
storagev1.VolumeLifecyclePersistent,
|
storagev1.VolumeLifecyclePersistent,
|
||||||
|
@ -124,6 +124,9 @@ func PatchCSIDeployment(f *framework.Framework, o PatchCSIOptions, object interf
|
|||||||
if o.PodInfo != nil {
|
if o.PodInfo != nil {
|
||||||
object.Spec.PodInfoOnMount = o.PodInfo
|
object.Spec.PodInfoOnMount = o.PodInfo
|
||||||
}
|
}
|
||||||
|
if o.StorageCapacity != nil {
|
||||||
|
object.Spec.StorageCapacity = o.StorageCapacity
|
||||||
|
}
|
||||||
if o.CanAttach != nil {
|
if o.CanAttach != nil {
|
||||||
object.Spec.AttachRequired = o.CanAttach
|
object.Spec.AttachRequired = o.CanAttach
|
||||||
}
|
}
|
||||||
@ -169,6 +172,10 @@ type PatchCSIOptions struct {
|
|||||||
// field *if* the driver deploys a CSIDriver object. Ignored
|
// field *if* the driver deploys a CSIDriver object. Ignored
|
||||||
// otherwise.
|
// otherwise.
|
||||||
CanAttach *bool
|
CanAttach *bool
|
||||||
|
// If not nil, the value to use for the CSIDriver.Spec.StorageCapacity
|
||||||
|
// field *if* the driver deploys a CSIDriver object. Ignored
|
||||||
|
// otherwise.
|
||||||
|
StorageCapacity *bool
|
||||||
// If not nil, the value to use for the CSIDriver.Spec.VolumeLifecycleModes
|
// If not nil, the value to use for the CSIDriver.Spec.VolumeLifecycleModes
|
||||||
// field *if* the driver deploys a CSIDriver object. Ignored
|
// field *if* the driver deploys a CSIDriver object. Ignored
|
||||||
// otherwise.
|
// otherwise.
|
||||||
|
Loading…
Reference in New Issue
Block a user