Add end-to-end tests for Volume Group Snapshot

This commit introduces comprehensive e2e tests for the
Volume Group Snapshot feature to ensure robust validation and functionality.
Addresses issue [kubernetes#1080 ](kubernetes-csi/external-snapshotter#1080).

Signed-off-by: Manish <myathnal@redhat.com>
This commit is contained in:
Manish 2024-08-20 16:06:59 +05:30
parent 1a2162fb75
commit 88f9f5250d
9 changed files with 482 additions and 11 deletions

View File

@ -392,6 +392,10 @@ var (
// TODO: document the feature (owning SIG, when to use this feature for a test)
VolumeSnapshotDataSource = framework.WithFeature(framework.ValidFeatures.Add("VolumeSnapshotDataSource"))
// Owner: sig-storage
// Volume group snapshot tests
VolumeGroupSnapshotDataSource = framework.WithFeature(framework.ValidFeatures.Add("volumegroupsnapshot"))
// TODO: document the feature (owning SIG, when to use this feature for a test)
VolumeSourceXFS = framework.WithFeature(framework.ValidFeatures.Add("VolumeSourceXFS"))

View File

@ -158,7 +158,7 @@ func InitHostPathCSIDriver() storageframework.TestDriver {
storageframework.CapReadWriteOncePod: true,
storageframework.CapMultiplePVsSameID: true,
storageframework.CapFSResizeFromSourceNotSupported: true,
storageframework.CapGroupSnapshot: true,
storageframework.CapVolumeGroupSnapshot: true,
// This is needed for the
// testsuites/volumelimits.go `should support volume limits`

View File

@ -131,6 +131,12 @@ type SnapshottableTestDriver interface {
GetSnapshotClass(ctx context.Context, config *PerTestConfig, parameters map[string]string) *unstructured.Unstructured
}
type VoulmeGroupSnapshottableTestDriver interface {
TestDriver
// GetVolumeGroupSnapshotClass returns a VolumeGroupSnapshotClass to create group snapshot.
GetVolumeGroupSnapshotClass(ctx context.Context, config *PerTestConfig, parameters map[string]string) *unstructured.Unstructured
}
// VolumeAttributesClassTestDriver represents an interface for a TestDriver that supports
// creating and modifying volumes via VolumeAttributesClass objects
type VolumeAttributesClassTestDriver interface {
@ -159,14 +165,14 @@ type Capability string
// Constants related to capabilities and behavior of the driver.
const (
CapPersistence Capability = "persistence" // data is persisted across pod restarts
CapBlock Capability = "block" // raw block mode
CapFsGroup Capability = "fsGroup" // volume ownership via fsGroup
CapVolumeMountGroup Capability = "volumeMountGroup" // Driver has the VolumeMountGroup CSI node capability. Because this is a FSGroup feature, the fsGroup capability must also be set to true.
CapExec Capability = "exec" // exec a file in the volume
CapSnapshotDataSource Capability = "snapshotDataSource" // support populate data from snapshot
CapGroupSnapshot Capability = "groupSnapshot" // support group snapshot
CapPVCDataSource Capability = "pvcDataSource" // support populate data from pvc
CapPersistence Capability = "persistence" // data is persisted across pod restarts
CapBlock Capability = "block" // raw block mode
CapFsGroup Capability = "fsGroup" // volume ownership via fsGroup
CapVolumeMountGroup Capability = "volumeMountGroup" // Driver has the VolumeMountGroup CSI node capability. Because this is a FSGroup feature, the fsGroup capability must also be set to true.
CapExec Capability = "exec" // exec a file in the volume
CapSnapshotDataSource Capability = "snapshotDataSource" // support populate data from snapshot
CapVolumeGroupSnapshot Capability = "groupSnapshot" // support group snapshot
CapPVCDataSource Capability = "pvcDataSource" // support populate data from pvc
// multiple pods on a node can use the same volume concurrently;
// for CSI, see:

View File

@ -60,6 +60,8 @@ var (
DynamicCreatedSnapshot TestSnapshotType = "DynamicSnapshot"
// PreprovisionedCreatedSnapshot represents a snapshot type for pre-provisioned snapshot
PreprovisionedCreatedSnapshot TestSnapshotType = "PreprovisionedSnapshot"
VolumeGroupSnapshot TestSnapshotType = "VolumeGroupSnapshot"
)
// TestSnapshotDeletionPolicy represents the deletion policy of the snapshot class
@ -318,6 +320,14 @@ var (
SnapshotDeletionPolicy: DeleteSnapshot,
VolType: DynamicPV,
}
// VolumeGroupSnapshotDelete is TestPattern for "VolumeGroupSnapshot"
VolumeGroupSnapshotDelete = TestPattern{
Name: " (delete policy)",
SnapshotType: VolumeGroupSnapshot,
SnapshotDeletionPolicy: DeleteSnapshot,
VolType: DynamicPV,
}
// PreprovisionedSnapshotDelete is TestPattern for "Pre-provisioned snapshot"
PreprovisionedSnapshotDelete = TestPattern{
Name: "Pre-provisioned Snapshot (delete policy)",

View File

@ -0,0 +1,126 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"context"
"fmt"
"github.com/onsi/ginkgo/v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
func getVolumeGroupSnapshot(labels map[string]interface{}, ns, snapshotClassName string) *unstructured.Unstructured {
snapshot := &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": "VolumeGroupSnapshot",
"apiVersion": utils.VolumeGroupSnapshotAPIVersion,
"metadata": map[string]interface{}{
"generateName": "group-snapshot-",
"namespace": ns,
},
"spec": map[string]interface{}{
"volumeGroupSnapshotClassName": snapshotClassName,
"source": map[string]interface{}{
"selector": map[string]interface{}{
"matchLabels": labels,
},
},
},
},
}
return snapshot
}
// VolumeGroupSnapshotResource represents a volumegroupsnapshot class, a volumegroupsnapshot and its bound contents for a specific test case
type VolumeGroupSnapshotResource struct {
Config *PerTestConfig
Pattern TestPattern
Vgs *unstructured.Unstructured
Vgscontent *unstructured.Unstructured
Vgsclass *unstructured.Unstructured
}
// CreateVolumeGroupSnapshot creates a VolumeGroupSnapshotClass with given SnapshotDeletionPolicy and a VolumeGroupSnapshot
// from the VolumeGroupSnapshotClass using a dynamic client.
// Returns the unstructured VolumeGroupSnapshotClass and VolumeGroupSnapshot objects.
func CreateVolumeGroupSnapshot(ctx context.Context, sDriver VoulmeGroupSnapshottableTestDriver, config *PerTestConfig, pattern TestPattern, groupName string, pvcNamespace string, timeouts *framework.TimeoutContext, parameters map[string]string) (*unstructured.Unstructured, *unstructured.Unstructured) {
defer ginkgo.GinkgoRecover()
var err error
if pattern.SnapshotType != VolumeGroupSnapshot {
err = fmt.Errorf("SnapshotType must be set to VolumeGroupSnapshot")
framework.ExpectNoError(err, "SnapshotType is set to VolumeGroupSnapshot")
}
dc := config.Framework.DynamicClient
ginkgo.By("creating a VolumeGroupSnapshotClass")
gsclass := sDriver.GetVolumeGroupSnapshotClass(ctx, config, parameters)
if gsclass == nil {
framework.Failf("Failed to get volume group snapshot class based on test config")
}
gsclass.Object["deletionPolicy"] = pattern.SnapshotDeletionPolicy.String()
gsclass, err = dc.Resource(utils.VolumeGroupSnapshotClassGVR).Create(ctx, gsclass, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create volume group snapshot class")
gsclass, err = dc.Resource(utils.VolumeGroupSnapshotClassGVR).Get(ctx, gsclass.GetName(), metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get volume group snapshot class")
ginkgo.By("creating a dynamic VolumeGroupSnapshot")
// Prepare a dynamically provisioned group volume snapshot with certain data
volumeGroupSnapshot := getVolumeGroupSnapshot(map[string]interface{}{
"group": groupName,
}, pvcNamespace, gsclass.GetName())
volumeGroupSnapshot, err = dc.Resource(utils.VolumeGroupSnapshotGVR).Namespace(volumeGroupSnapshot.GetNamespace()).Create(ctx, volumeGroupSnapshot, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create volume group snapshot")
ginkgo.By("Waiting for group snapshot to be ready")
err = utils.WaitForVolumeGroupSnapshotReady(ctx, dc, volumeGroupSnapshot.GetNamespace(), volumeGroupSnapshot.GetName(), framework.Poll, timeouts.SnapshotCreate*10)
framework.ExpectNoError(err, "Group snapshot is not ready to use within the timeout")
ginkgo.By("Getting group snapshot and content")
volumeGroupSnapshot, err = dc.Resource(utils.VolumeGroupSnapshotGVR).Namespace(volumeGroupSnapshot.GetNamespace()).Get(ctx, volumeGroupSnapshot.GetName(), metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get volume group snapshot after creation")
return gsclass, volumeGroupSnapshot
}
// CleanupResource deletes the VolumeGroupSnapshotClass and VolumeGroupSnapshot objects using a dynamic client.
func (r *VolumeGroupSnapshotResource) CleanupResource(ctx context.Context, timeouts *framework.TimeoutContext) error {
defer ginkgo.GinkgoRecover()
dc := r.Config.Framework.DynamicClient
err := dc.Resource(utils.VolumeGroupSnapshotClassGVR).Delete(ctx, r.Vgsclass.GetName(), metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete volume group snapshot class")
return nil
}
// CreateVolumeGroupSnapshotResource creates a VolumeGroupSnapshotResource object with the given parameters.
func CreateVolumeGroupSnapshotResource(ctx context.Context, sDriver VoulmeGroupSnapshottableTestDriver, config *PerTestConfig, pattern TestPattern, pvcName string, pvcNamespace string, timeouts *framework.TimeoutContext, parameters map[string]string) *VolumeGroupSnapshotResource {
vgsclass, snapshot := CreateVolumeGroupSnapshot(ctx, sDriver, config, pattern, pvcName, pvcNamespace, timeouts, parameters)
vgs := &VolumeGroupSnapshotResource{
Config: config,
Pattern: pattern,
Vgs: snapshot,
Vgsclass: vgsclass,
Vgscontent: nil,
}
return vgs
}

View File

@ -68,6 +68,7 @@ var BaseSuites = []func() storageframework.TestSuite{
InitTopologyTestSuite,
InitVolumeStressTestSuite,
InitFsGroupChangePolicyTestSuite,
InitVolumeGroupSnapshottableTestSuite,
func() storageframework.TestSuite {
return InitCustomEphemeralTestSuite(GenericEphemeralTestPatterns())
},
@ -79,6 +80,7 @@ var CSISuites = append(BaseSuites,
return InitCustomEphemeralTestSuite(CSIEphemeralTestPatterns())
},
InitSnapshottableTestSuite,
InitVolumeGroupSnapshottableTestSuite,
InitSnapshottableStressTestSuite,
InitVolumePerformanceTestSuite,
InitReadWriteOncePodTestSuite,

View File

@ -0,0 +1,221 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testsuites
import (
"context"
"github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
admissionapi "k8s.io/pod-security-admission/api"
)
type volumeGroupSnapshottableTest struct {
config *storageframework.PerTestConfig
pods []*v1.Pod
volumeGroup [3][]*storageframework.VolumeResource
snapshots []*storageframework.VolumeGroupSnapshotResource
numPods int
numVolumes int
}
type VolumeGroupSnapshottableTestSuite struct {
tsInfo storageframework.TestSuiteInfo
}
func InitVolumeGroupSnapshottableTestSuite() storageframework.TestSuite {
patterns := []storageframework.TestPattern{
storageframework.VolumeGroupSnapshotDelete,
}
return InitCustomGroupSnapshottableTestSuite(patterns)
}
func InitCustomGroupSnapshottableTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
return &VolumeGroupSnapshottableTestSuite{
tsInfo: storageframework.TestSuiteInfo{
Name: "volumegroupsnapshottable",
TestPatterns: patterns,
SupportedSizeRange: e2evolume.SizeRange{
Min: "1Mi",
},
TestTags: []interface{}{feature.VolumeGroupSnapshotDataSource},
},
}
}
func (s *VolumeGroupSnapshottableTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
// Check preconditions.
dInfo := driver.GetDriverInfo()
ok := false
_, ok = driver.(storageframework.VoulmeGroupSnapshottableTestDriver)
if !dInfo.Capabilities[storageframework.CapVolumeGroupSnapshot] || !ok {
e2eskipper.Skipf("Driver %q does not support group snapshots - skipping", dInfo.Name)
}
}
func (s *VolumeGroupSnapshottableTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
return s.tsInfo
}
func (s *VolumeGroupSnapshottableTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
labelKey := "group"
labelValue := "test-group"
f := framework.NewDefaultFramework("volumegroupsnapshottable")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
ginkgo.Describe("VolumeGroupSnapshottable", func() {
ginkgo.Context("", func() {
var (
snapshottableDriver storageframework.VoulmeGroupSnapshottableTestDriver
cs clientset.Interface
groupTest *volumeGroupSnapshottableTest
)
init := func(ctx context.Context) {
snapshottableDriver = driver.(storageframework.VoulmeGroupSnapshottableTestDriver)
cs = f.ClientSet
config := driver.PrepareTest(ctx, f)
groupTest = &volumeGroupSnapshottableTest{
config: config,
volumeGroup: [3][]*storageframework.VolumeResource{},
snapshots: []*storageframework.VolumeGroupSnapshotResource{},
pods: []*v1.Pod{},
numPods: 1,
numVolumes: 3,
}
}
createGroupLabel := func(ctx context.Context, pvc *v1.PersistentVolumeClaim, labelKey, labelValue string) {
if pvc.Labels == nil {
pvc.Labels = map[string]string{}
}
pvc.Labels[labelKey] = labelValue
_, err := cs.CoreV1().PersistentVolumeClaims(pvc.GetNamespace()).Update(ctx, pvc, metav1.UpdateOptions{})
framework.ExpectNoError(err, "failed to update PVC %s", pvc.Name)
}
createPodsAndVolumes := func(ctx context.Context) {
for i := 0; i < groupTest.numPods; i++ {
framework.Logf("Creating resources for pod %d/%d", i, groupTest.numPods-1)
for j := 0; j < groupTest.numVolumes; j++ {
volume := storageframework.CreateVolumeResource(ctx, driver, groupTest.config, pattern, s.GetTestSuiteInfo().SupportedSizeRange)
groupTest.volumeGroup[i] = append(groupTest.volumeGroup[i], volume)
createGroupLabel(ctx, volume.Pvc, labelKey, labelValue)
}
pvcs := []*v1.PersistentVolumeClaim{}
for _, volume := range groupTest.volumeGroup[i] {
pvcs = append(pvcs, volume.Pvc)
}
// Create a pod with multiple volumes
podConfig := e2epod.Config{
NS: f.Namespace.Name,
PVCs: pvcs,
SeLinuxLabel: e2epv.SELinuxLabel,
}
pod, err := e2epod.MakeSecPod(&podConfig)
framework.ExpectNoError(err, "failed to create pod")
groupTest.pods = append(groupTest.pods, pod)
}
for i, pod := range groupTest.pods {
pod, err := cs.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
framework.Failf("Failed to create pod-%d [%+v]. Error: %v", i, pod, err)
}
if err = e2epod.WaitForPodRunningInNamespace(ctx, cs, pod); err != nil {
framework.Failf("Failed to wait for pod-%d [%+v] to turn into running status. Error: %v", i, pod, err)
}
}
}
cleanup := func(ctx context.Context) {
for _, pod := range groupTest.pods {
framework.Logf("Deleting pod %s", pod.Name)
err := e2epod.DeletePodWithWait(ctx, cs, pod)
framework.ExpectNoError(err, "failed to delete pod %s", pod.Name)
}
for _, group := range groupTest.volumeGroup {
for _, volume := range group {
framework.Logf("Deleting volume %s", volume.Pvc.Name)
err := volume.CleanupResource(ctx)
framework.ExpectNoError(err, "failed to delete volume %s", volume.Pvc.Name)
}
}
}
ginkgo.It("should create snapshots for multiple volumes in a pod", func(ctx context.Context) {
init(ctx)
createPodsAndVolumes(ctx)
ginkgo.DeferCleanup(cleanup)
snapshot := storageframework.CreateVolumeGroupSnapshotResource(ctx, snapshottableDriver, groupTest.config, pattern, labelValue, groupTest.volumeGroup[0][0].Pvc.GetNamespace(), f.Timeouts, map[string]string{"deletionPolicy": pattern.SnapshotDeletionPolicy.String()})
groupTest.snapshots = append(groupTest.snapshots, snapshot)
ginkgo.By("verifying the snapshots in the group are ready to use")
status := snapshot.Vgs.Object["status"]
err := framework.Gomega().Expect(status).NotTo(gomega.BeNil())
framework.ExpectNoError(err, "failed to get status of group snapshot")
volumes := status.(map[string]interface{})["pvcVolumeSnapshotRefList"]
err = framework.Gomega().Expect(volumes).NotTo(gomega.BeNil())
framework.ExpectNoError(err, "failed to get volume snapshot list")
volumeList := volumes.([]interface{})
err = framework.Gomega().Expect(len(volumeList)).To(gomega.Equal(groupTest.numVolumes))
framework.ExpectNoError(err, "failed to get volume snapshot list")
claimSize := groupTest.volumeGroup[0][0].Pvc.Spec.Resources.Requests.Storage().String()
for _, volume := range volumeList {
// Create a PVC from the snapshot
pvc := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
StorageClassName: &groupTest.volumeGroup[0][0].Sc.Name,
ClaimSize: claimSize,
}, f.Namespace.Name)
group := "snapshot.storage.k8s.io"
pvc.Spec.DataSource = &v1.TypedLocalObjectReference{
APIGroup: &group,
Kind: "VolumeSnapshot",
Name: volume.(map[string]interface{})["volumeSnapshotRef"].(map[string]interface{})["name"].(string),
}
volSrc := v1.VolumeSource{
Ephemeral: &v1.EphemeralVolumeSource{
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
Spec: pvc.Spec,
},
},
}
pvc, err := cs.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(ctx, pvc, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create PVC from snapshot")
pod := StartInPodWithVolumeSource(ctx, cs, volSrc, pvc.Namespace, "snapshot-pod", "sleep 300", groupTest.config.ClientNodeSelection)
ginkgo.DeferCleanup(e2epod.DeletePodWithWait, cs, pod)
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, cs, pod.Name, pod.Namespace, f.Timeouts.PodStartSlow), "Pod did not start in expected time")
}
})
})
})
}

View File

@ -0,0 +1,102 @@
/*
Copyright 2024 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package utils
import (
"context"
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apiserver/pkg/storage/names"
"k8s.io/client-go/dynamic"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
// VolumeGroupSnapshot is the group snapshot api
VolumeGroupSnapshotAPIGroup = "groupsnapshot.storage.k8s.io"
// VolumeGroupSnapshotAPIVersion is the group snapshot api version
VolumeGroupSnapshotAPIVersion = "groupsnapshot.storage.k8s.io/v1alpha1"
)
var (
// VolumeGroupSnapshotGVR is GroupVersionResource for volumegroupsnapshots
VolumeGroupSnapshotGVR = schema.GroupVersionResource{Group: VolumeGroupSnapshotAPIGroup, Version: "v1alpha1", Resource: "volumegroupsnapshots"}
// VolumeGroupSnapshotClassGVR is GroupVersionResource for volumegroupsnapshotsclasses
VolumeGroupSnapshotClassGVR = schema.GroupVersionResource{Group: VolumeGroupSnapshotAPIGroup, Version: "v1alpha1", Resource: "volumegroupsnapshotclasses"}
)
// WaitForVolumeGroupSnapshotReady waits for a VolumeGroupSnapshot to be ready to use or until timeout occurs, whichever comes first.
func WaitForVolumeGroupSnapshotReady(ctx context.Context, c dynamic.Interface, ns string, volumeGroupSnapshotName string, poll, timeout time.Duration) error {
framework.Logf("Waiting up to %v for VolumeGroupSnapshot %s to become ready", timeout, volumeGroupSnapshotName)
if successful := WaitUntil(poll, timeout, func() bool {
volumeGroupSnapshot, err := c.Resource(VolumeGroupSnapshotGVR).Namespace(ns).Get(ctx, volumeGroupSnapshotName, metav1.GetOptions{})
if err != nil {
framework.Logf("Failed to get group snapshot %q, retrying in %v. Error: %v", volumeGroupSnapshotName, poll, err)
return false
}
status := volumeGroupSnapshot.Object["status"]
if status == nil {
framework.Logf("VolumeGroupSnapshot %s found but is not ready.", volumeGroupSnapshotName)
return false
}
value := status.(map[string]interface{})
if value["readyToUse"] == true {
framework.Logf("VolumeSnapshot %s found and is ready", volumeGroupSnapshotName)
return true
}
framework.Logf("VolumeSnapshot %s found but is not ready.", volumeGroupSnapshotName)
return false
}); successful {
return nil
}
return fmt.Errorf("VolumeSnapshot %s is not ready within %v", volumeGroupSnapshotName, timeout)
}
func GenerateVolumeGroupSnapshotClassSpec(
snapshotter string,
parameters map[string]string,
ns string,
) *unstructured.Unstructured {
deletionPolicy, ok := parameters["deletionPolicy"]
if !ok {
deletionPolicy = "Delete"
}
volumeGroupSnapshotClass := &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": "VolumeGroupSnapshotClass",
"apiVersion": VolumeGroupSnapshotAPIVersion,
"metadata": map[string]interface{}{
// Name must be unique, so let's base it on namespace name and use GenerateName
"name": names.SimpleNameGenerator.GenerateName(ns),
},
"driver": snapshotter,
"parameters": parameters,
"deletionPolicy": deletionPolicy,
},
}
return volumeGroupSnapshotClass
}

View File

@ -308,7 +308,7 @@ main() {
# debug kind version
kind version
cp test/e2e/testing-manifests/storage-csi/external-snapshotter/volume-group-snapshots/csi-hostpath-plugin.yaml test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml || exit 1
# build kubernetes
build
# in CI attempt to release some memory after building
@ -317,7 +317,7 @@ main() {
echo 1 > /proc/sys/vm/drop_caches || true
fi
cp test/e2e/testing-manifests/storage-csi/external-snapshotter/volume-group-snapshots/csi-hostpath-plugin.yaml test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml || exit 1
# create the cluster and run tests
res=0
create_cluster || res=$?