mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 10:20:51 +00:00
Volume snapshot e2e test to validate
VolumeSnapshotContent and PVC finalizer
This commit is contained in:
parent
ededd08ba1
commit
e95af138b5
@ -22,6 +22,6 @@ spec:
|
||||
serviceAccount: volume-snapshot-controller
|
||||
containers:
|
||||
- name: volume-snapshot-controller
|
||||
image: k8s.gcr.io/sig-storage/snapshot-controller:v3.0.0
|
||||
image: k8s.gcr.io/sig-storage/snapshot-controller:v3.0.2
|
||||
args:
|
||||
- "--v=5"
|
||||
|
@ -15,6 +15,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/skipper:go_default_library",
|
||||
"//test/e2e/storage/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -19,6 +19,7 @@ package framework
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -128,6 +129,8 @@ type PersistentVolumeConfig struct {
|
||||
// PersistentVolumeClaimConfig is consumed by MakePersistentVolumeClaim() to
|
||||
// generate a PVC object.
|
||||
type PersistentVolumeClaimConfig struct {
|
||||
// Name of the PVC. If set, overrides NamePrefix
|
||||
Name string
|
||||
// NamePrefix defaults to "pvc-" if unspecified
|
||||
NamePrefix string
|
||||
// ClaimSize must be specified in the Quantity format. Defaults to 2Gi if
|
||||
@ -621,6 +624,7 @@ func MakePersistentVolumeClaim(cfg PersistentVolumeClaimConfig, ns string) *v1.P
|
||||
|
||||
return &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: cfg.Name,
|
||||
GenerateName: cfg.NamePrefix,
|
||||
Namespace: ns,
|
||||
Annotations: cfg.Annotations,
|
||||
@ -844,3 +848,31 @@ func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, poll,
|
||||
}
|
||||
return fmt.Errorf("PersistentVolume %s still exists within %v", pvName, timeout)
|
||||
}
|
||||
|
||||
// WaitForPVCFinalizer waits for a finalizer to be added to a PVC in a given namespace.
|
||||
func WaitForPVCFinalizer(ctx context.Context, cs clientset.Interface, name, namespace, finalizer string, poll, timeout time.Duration) error {
|
||||
var (
|
||||
err error
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
)
|
||||
framework.Logf("Waiting up to %v for PersistentVolumeClaim %s/%s to contain finalizer %s", timeout, namespace, name, finalizer)
|
||||
if successful := utils.WaitUntil(poll, timeout, func() bool {
|
||||
pvc, err = cs.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get PersistentVolumeClaim %s/%s with err: %v. Will retry in %v", name, namespace, err, timeout)
|
||||
return false
|
||||
}
|
||||
for _, f := range pvc.Finalizers {
|
||||
if f == finalizer {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}); successful {
|
||||
return nil
|
||||
}
|
||||
if err == nil {
|
||||
err = fmt.Errorf("finalizer %s not added to pvc %s/%s", finalizer, namespace, name)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -51,6 +51,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
@ -88,6 +89,7 @@ go_library(
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
"//test/e2e/storage/drivers:go_default_library",
|
||||
"//test/e2e/storage/testpatterns:go_default_library",
|
||||
"//test/e2e/storage/testsuites:go_default_library",
|
||||
"//test/e2e/storage/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
|
@ -33,6 +33,7 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -47,7 +48,9 @@ import (
|
||||
e2eevents "k8s.io/kubernetes/test/e2e/framework/events"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@ -106,6 +109,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
enableNodeExpansion bool // enable node expansion for CSI mock driver
|
||||
// just disable resizing on driver it overrides enableResizing flag for CSI mock driver
|
||||
disableResizingOnDriver bool
|
||||
enableSnapshot bool
|
||||
javascriptHooks map[string]string
|
||||
}
|
||||
|
||||
@ -116,6 +120,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
pods []*v1.Pod
|
||||
pvcs []*v1.PersistentVolumeClaim
|
||||
sc map[string]*storagev1.StorageClass
|
||||
vsc map[string]*unstructured.Unstructured
|
||||
driver testsuites.TestDriver
|
||||
provisioner string
|
||||
tp testParameters
|
||||
@ -127,9 +132,10 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
|
||||
init := func(tp testParameters) {
|
||||
m = mockDriverSetup{
|
||||
cs: f.ClientSet,
|
||||
sc: make(map[string]*storagev1.StorageClass),
|
||||
tp: tp,
|
||||
cs: f.ClientSet,
|
||||
sc: make(map[string]*storagev1.StorageClass),
|
||||
vsc: make(map[string]*unstructured.Unstructured),
|
||||
tp: tp,
|
||||
}
|
||||
cs := f.ClientSet
|
||||
var err error
|
||||
@ -142,6 +148,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
DisableAttach: tp.disableAttach,
|
||||
EnableResizing: tp.enableResizing,
|
||||
EnableNodeExpansion: tp.enableNodeExpansion,
|
||||
EnableSnapshot: tp.enableSnapshot,
|
||||
JavascriptHooks: tp.javascriptHooks,
|
||||
}
|
||||
|
||||
@ -244,6 +251,10 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
cs.StorageV1().StorageClasses().Delete(context.TODO(), sc.Name, metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
for _, vsc := range m.vsc {
|
||||
ginkgo.By(fmt.Sprintf("Deleting volumesnapshotclass %s", vsc.GetName()))
|
||||
m.config.Framework.DynamicClient.Resource(testsuites.SnapshotClassGVR).Delete(context.TODO(), vsc.GetName(), metav1.DeleteOptions{})
|
||||
}
|
||||
ginkgo.By("Cleaning up resources")
|
||||
for _, cleanupFunc := range m.testCleanups {
|
||||
cleanupFunc()
|
||||
@ -1165,6 +1176,131 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
})
|
||||
}
|
||||
})
|
||||
ginkgo.Context("CSI Volume Snapshots [Feature:VolumeSnapshotDataSource]", func() {
|
||||
// Global variable in all scripts (called before each test)
|
||||
globalScript := `counter=0; console.log("globals loaded", OK, DEADLINEEXCEEDED)`
|
||||
tests := []struct {
|
||||
name string
|
||||
createVolumeScript string
|
||||
createSnapshotScript string
|
||||
}{
|
||||
{
|
||||
name: "volumesnapshotcontent and pvc in Bound state with deletion timestamp set should not get deleted while snapshot finalizer exists",
|
||||
createVolumeScript: `OK`,
|
||||
createSnapshotScript: `console.log("Counter:", ++counter); if (counter < 8) { DEADLINEEXCEEDED; } else { OK; }`,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
ginkgo.It(test.name, func() {
|
||||
scripts := map[string]string{
|
||||
"globals": globalScript,
|
||||
"createVolumeStart": test.createVolumeScript,
|
||||
"createSnapshotStart": test.createSnapshotScript,
|
||||
}
|
||||
init(testParameters{
|
||||
disableAttach: true,
|
||||
registerDriver: true,
|
||||
enableSnapshot: true,
|
||||
javascriptHooks: scripts,
|
||||
})
|
||||
sDriver, ok := m.driver.(testsuites.SnapshottableTestDriver)
|
||||
if !ok {
|
||||
e2eskipper.Skipf("mock driver %s does not support snapshots -- skipping", m.driver.GetDriverInfo().Name)
|
||||
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), csiPodRunningTimeout)
|
||||
defer cancel()
|
||||
defer cleanup()
|
||||
|
||||
var sc *storagev1.StorageClass
|
||||
if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok {
|
||||
sc = dDriver.GetDynamicProvisionStorageClass(m.config, "")
|
||||
}
|
||||
ginkgo.By("Creating storage class")
|
||||
class, err := m.cs.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to create class: %v", err)
|
||||
m.sc[class.Name] = class
|
||||
claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
||||
// Use static name so that the volumesnapshot can be created before the pvc.
|
||||
Name: "snapshot-test-pvc",
|
||||
StorageClassName: &(class.Name),
|
||||
}, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Creating snapshot")
|
||||
// TODO: Test VolumeSnapshots with Retain policy
|
||||
snapshotClass, snapshot := testsuites.CreateSnapshot(sDriver, m.config, testpatterns.DynamicSnapshotDelete, claim.Name, claim.Namespace)
|
||||
framework.ExpectNoError(err, "failed to create snapshot")
|
||||
m.vsc[snapshotClass.GetName()] = snapshotClass
|
||||
volumeSnapshotName := snapshot.GetName()
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating PVC %s/%s", claim.Namespace, claim.Name))
|
||||
claim, err = m.cs.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(context.TODO(), claim, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to create claim: %v", err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Wait for finalizer to be added to claim %s/%s", claim.Namespace, claim.Name))
|
||||
err = e2epv.WaitForPVCFinalizer(ctx, m.cs, claim.Name, claim.Namespace, pvcAsSourceProtectionFinalizer, 1*time.Millisecond, 1*time.Minute)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Wait for PVC to be Bound")
|
||||
_, err = e2epv.WaitForPVClaimBoundPhase(m.cs, []*v1.PersistentVolumeClaim{claim}, 1*time.Minute)
|
||||
framework.ExpectNoError(err, "Failed to create claim: %v", err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Delete PVC %s", claim.Name))
|
||||
err = e2epv.DeletePersistentVolumeClaim(m.cs, claim.Name, claim.Namespace)
|
||||
framework.ExpectNoError(err, "failed to delete pvc")
|
||||
|
||||
ginkgo.By("Get PVC from API server and verify deletion timestamp is set")
|
||||
claim, err = m.cs.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Get(context.TODO(), claim.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
framework.ExpectNoError(err, "Failed to get claim: %v", err)
|
||||
}
|
||||
framework.Logf("PVC not found. Continuing to test VolumeSnapshotContent finalizer")
|
||||
}
|
||||
if claim != nil && claim.DeletionTimestamp == nil {
|
||||
framework.Failf("Expected deletion timestamp to be set on PVC %s", claim.Name)
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Get VolumeSnapshotContent bound to VolumeSnapshot %s", snapshot.GetName()))
|
||||
snapshotContent := testsuites.GetSnapshotContentFromSnapshot(m.config.Framework.DynamicClient, snapshot)
|
||||
volumeSnapshotContentName := snapshotContent.GetName()
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Verify VolumeSnapshotContent %s contains finalizer %s", snapshot.GetName(), volumeSnapshotContentFinalizer))
|
||||
err = utils.WaitForGVRFinalizer(ctx, m.config.Framework.DynamicClient, testsuites.SnapshotContentGVR, volumeSnapshotContentName, "", volumeSnapshotContentFinalizer, 1*time.Millisecond, 1*time.Minute)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Delete VolumeSnapshotContent %s", snapshotContent.GetName()))
|
||||
err = m.config.Framework.DynamicClient.Resource(testsuites.SnapshotContentGVR).Delete(ctx, snapshotContent.GetName(), metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "Failed to delete snapshotcontent: %v", err)
|
||||
|
||||
ginkgo.By("Get VolumeSnapshotContent from API server and verify deletion timestamp is set")
|
||||
snapshotContent, err = m.config.Framework.DynamicClient.Resource(testsuites.SnapshotContentGVR).Get(context.TODO(), snapshotContent.GetName(), metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if snapshotContent.GetDeletionTimestamp() == nil {
|
||||
framework.Failf("Expected deletion timestamp to be set on snapshotcontent")
|
||||
}
|
||||
|
||||
if claim != nil {
|
||||
ginkgo.By(fmt.Sprintf("Wait for PV %s to be deleted", claim.Spec.VolumeName))
|
||||
err = e2epv.WaitForPersistentVolumeDeleted(m.cs, claim.Spec.VolumeName, framework.Poll, 3*time.Minute)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("failed to delete PV %s", claim.Spec.VolumeName))
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Verify VolumeSnapshot %s contains finalizer %s", snapshot.GetName(), volumeSnapshotBoundFinalizer))
|
||||
err = utils.WaitForGVRFinalizer(ctx, m.config.Framework.DynamicClient, testsuites.SnapshotGVR, volumeSnapshotName, f.Namespace.Name, volumeSnapshotBoundFinalizer, 1*time.Millisecond, 1*time.Minute)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Delete VolumeSnapshot")
|
||||
err = testsuites.DeleteAndWaitSnapshot(m.config.Framework.DynamicClient, f.Namespace.Name, volumeSnapshotName, framework.Poll, framework.SnapshotDeleteTimeout)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("failed to delete VolumeSnapshot %s", volumeSnapshotName))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Wait for VolumeSnapshotContent %s to be deleted", volumeSnapshotContentName))
|
||||
err = utils.WaitForGVRDeletion(m.config.Framework.DynamicClient, testsuites.SnapshotContentGVR, volumeSnapshotContentName, framework.Poll, framework.SnapshotDeleteTimeout)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("failed to delete VolumeSnapshotContent %s", volumeSnapshotContentName))
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
// A lot of this code was copied from e2e/framework. It would be nicer
|
||||
@ -1186,8 +1322,11 @@ func podRunning(ctx context.Context, c clientset.Interface, podName, namespace s
|
||||
}
|
||||
|
||||
const (
|
||||
podStartTimeout = 5 * time.Minute
|
||||
poll = 2 * time.Second
|
||||
podStartTimeout = 5 * time.Minute
|
||||
poll = 2 * time.Second
|
||||
pvcAsSourceProtectionFinalizer = "snapshot.storage.kubernetes.io/pvc-as-source-protection"
|
||||
volumeSnapshotContentFinalizer = "snapshot.storage.kubernetes.io/volumesnapshotcontent-bound-protection"
|
||||
volumeSnapshotBoundFinalizer = "snapshot.storage.kubernetes.io/volumesnapshot-bound-protection"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -257,11 +257,13 @@ type CSIMockDriverOpts struct {
|
||||
EnableTopology bool
|
||||
EnableResizing bool
|
||||
EnableNodeExpansion bool
|
||||
EnableSnapshot bool
|
||||
JavascriptHooks map[string]string
|
||||
}
|
||||
|
||||
var _ testsuites.TestDriver = &mockCSIDriver{}
|
||||
var _ testsuites.DynamicPVTestDriver = &mockCSIDriver{}
|
||||
var _ testsuites.SnapshottableTestDriver = &mockCSIDriver{}
|
||||
|
||||
// InitMockCSIDriver returns a mockCSIDriver that implements TestDriver interface
|
||||
func InitMockCSIDriver(driverOpts CSIMockDriverOpts) testsuites.TestDriver {
|
||||
@ -269,6 +271,7 @@ func InitMockCSIDriver(driverOpts CSIMockDriverOpts) testsuites.TestDriver {
|
||||
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/external-snapshotter/rbac.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/mock/csi-mock-rbac.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/mock/csi-storageclass.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml",
|
||||
@ -286,6 +289,10 @@ func InitMockCSIDriver(driverOpts CSIMockDriverOpts) testsuites.TestDriver {
|
||||
driverManifests = append(driverManifests, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-resizer.yaml")
|
||||
}
|
||||
|
||||
if driverOpts.EnableSnapshot {
|
||||
driverManifests = append(driverManifests, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-snapshotter.yaml")
|
||||
}
|
||||
|
||||
return &mockCSIDriver{
|
||||
driverInfo: testsuites.DriverInfo{
|
||||
Name: "csi-mock",
|
||||
@ -328,6 +335,15 @@ func (m *mockCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTe
|
||||
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
||||
}
|
||||
|
||||
func (m *mockCSIDriver) GetSnapshotClass(config *testsuites.PerTestConfig) *unstructured.Unstructured {
|
||||
parameters := map[string]string{}
|
||||
snapshotter := m.driverInfo.Name + "-" + config.Framework.UniqueName
|
||||
ns := config.Framework.Namespace.Name
|
||||
suffix := fmt.Sprintf("%s-vsc", snapshotter)
|
||||
|
||||
return testsuites.GetSnapshotClass(snapshotter, parameters, ns, suffix)
|
||||
}
|
||||
|
||||
func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||
// Create secondary namespace which will be used for creating driver
|
||||
driverNamespace := utils.CreateDriverNamespace(f)
|
||||
|
@ -355,54 +355,77 @@ type SnapshotResource struct {
|
||||
Vsclass *unstructured.Unstructured
|
||||
}
|
||||
|
||||
// CreateSnapshotResource creates a snapshot resource for the current test. It knows how to deal with
|
||||
// different test pattern snapshot provisioning and deletion policy
|
||||
func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, pvcName string, pvcNamespace string) *SnapshotResource {
|
||||
// CreateSnapshot creates a VolumeSnapshotClass with given SnapshotDeletionPolicy and a VolumeSnapshot
|
||||
// from the VolumeSnapshotClass using a dynamic client.
|
||||
// Returns the unstructured VolumeSnapshotClass and VolumeSnapshot objects.
|
||||
func CreateSnapshot(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, pvcName string, pvcNamespace string) (*unstructured.Unstructured, *unstructured.Unstructured) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
var err error
|
||||
if pattern.SnapshotType != testpatterns.DynamicCreatedSnapshot && pattern.SnapshotType != testpatterns.PreprovisionedCreatedSnapshot {
|
||||
err = fmt.Errorf("SnapshotType must be set to either DynamicCreatedSnapshot or PreprovisionedCreatedSnapshot")
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
r := SnapshotResource{
|
||||
Config: config,
|
||||
Pattern: pattern,
|
||||
}
|
||||
dc := r.Config.Framework.DynamicClient
|
||||
dc := config.Framework.DynamicClient
|
||||
|
||||
ginkgo.By("creating a SnapshotClass")
|
||||
r.Vsclass = sDriver.GetSnapshotClass(config)
|
||||
if r.Vsclass == nil {
|
||||
sclass := sDriver.GetSnapshotClass(config)
|
||||
if sclass == nil {
|
||||
framework.Failf("Failed to get snapshot class based on test config")
|
||||
}
|
||||
r.Vsclass.Object["deletionPolicy"] = pattern.SnapshotDeletionPolicy.String()
|
||||
sclass.Object["deletionPolicy"] = pattern.SnapshotDeletionPolicy.String()
|
||||
|
||||
r.Vsclass, err = dc.Resource(SnapshotClassGVR).Create(context.TODO(), r.Vsclass, metav1.CreateOptions{})
|
||||
sclass, err = dc.Resource(SnapshotClassGVR).Create(context.TODO(), sclass, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
r.Vsclass, err = dc.Resource(SnapshotClassGVR).Get(context.TODO(), r.Vsclass.GetName(), metav1.GetOptions{})
|
||||
sclass, err = dc.Resource(SnapshotClassGVR).Get(context.TODO(), sclass.GetName(), metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("creating a dynamic VolumeSnapshot")
|
||||
// prepare a dynamically provisioned volume snapshot with certain data
|
||||
r.Vs = getSnapshot(pvcName, pvcNamespace, r.Vsclass.GetName())
|
||||
snapshot := getSnapshot(pvcName, pvcNamespace, sclass.GetName())
|
||||
|
||||
r.Vs, err = dc.Resource(SnapshotGVR).Namespace(r.Vs.GetNamespace()).Create(context.TODO(), r.Vs, metav1.CreateOptions{})
|
||||
snapshot, err = dc.Resource(SnapshotGVR).Namespace(snapshot.GetNamespace()).Create(context.TODO(), snapshot, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = WaitForSnapshotReady(dc, r.Vs.GetNamespace(), r.Vs.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
|
||||
return sclass, snapshot
|
||||
}
|
||||
|
||||
// GetSnapshotContentFromSnapshot returns the VolumeSnapshotContent object Bound to a
|
||||
// given VolumeSnapshot
|
||||
func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured.Unstructured) *unstructured.Unstructured {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
err := WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
r.Vs, err = dc.Resource(SnapshotGVR).Namespace(r.Vs.GetNamespace()).Get(context.TODO(), r.Vs.GetName(), metav1.GetOptions{})
|
||||
vs, err := dc.Resource(SnapshotGVR).Namespace(snapshot.GetNamespace()).Get(context.TODO(), snapshot.GetName(), metav1.GetOptions{})
|
||||
|
||||
snapshotStatus := r.Vs.Object["status"].(map[string]interface{})
|
||||
snapshotStatus := vs.Object["status"].(map[string]interface{})
|
||||
snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string)
|
||||
framework.Logf("received snapshotStatus %v", snapshotStatus)
|
||||
framework.Logf("snapshotContentName %s", snapshotContentName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
r.Vscontent, err = dc.Resource(SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{})
|
||||
vscontent, err := dc.Resource(SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
return vscontent
|
||||
|
||||
}
|
||||
|
||||
// CreateSnapshotResource creates a snapshot resource for the current test. It knows how to deal with
|
||||
// different test pattern snapshot provisioning and deletion policy
|
||||
func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, pvcName string, pvcNamespace string) *SnapshotResource {
|
||||
var err error
|
||||
r := SnapshotResource{
|
||||
Config: config,
|
||||
Pattern: pattern,
|
||||
}
|
||||
r.Vsclass, r.Vs = CreateSnapshot(sDriver, config, pattern, pvcName, pvcNamespace)
|
||||
|
||||
dc := r.Config.Framework.DynamicClient
|
||||
|
||||
r.Vscontent = GetSnapshotContentFromSnapshot(dc, r.Vs)
|
||||
|
||||
if pattern.SnapshotType == testpatterns.PreprovisionedCreatedSnapshot {
|
||||
// prepare a pre-provisioned VolumeSnapshotContent with certain data
|
||||
// Because this could be run with an external CSI driver, we have no way
|
||||
|
@ -21,6 +21,7 @@ go_library(
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
|
@ -33,6 +33,7 @@ import (
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -806,3 +807,37 @@ func WaitUntil(poll, timeout time.Duration, checkDone func() bool) bool {
|
||||
framework.Logf("WaitUntil failed after reaching the timeout %v", timeout)
|
||||
return false
|
||||
}
|
||||
|
||||
// WaitForGVRFinalizer waits until a object from a given GVR contains a finalizer
|
||||
// If namespace is empty, assume it is a non-namespaced object
|
||||
func WaitForGVRFinalizer(ctx context.Context, c dynamic.Interface, gvr schema.GroupVersionResource, objectName, objectNamespace, finalizer string, poll, timeout time.Duration) error {
|
||||
framework.Logf("Waiting up to %v for object %s %s of resource %s to contain finalizer %s", timeout, objectNamespace, objectName, gvr.Resource, finalizer)
|
||||
var (
|
||||
err error
|
||||
resource *unstructured.Unstructured
|
||||
)
|
||||
if successful := WaitUntil(poll, timeout, func() bool {
|
||||
switch objectNamespace {
|
||||
case "":
|
||||
resource, err = c.Resource(gvr).Get(ctx, objectName, metav1.GetOptions{})
|
||||
default:
|
||||
resource, err = c.Resource(gvr).Namespace(objectNamespace).Get(ctx, objectName, metav1.GetOptions{})
|
||||
}
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get object %s %s with err: %v. Will retry in %v", objectNamespace, objectName, err, timeout)
|
||||
return false
|
||||
}
|
||||
for _, f := range resource.GetFinalizers() {
|
||||
if f == finalizer {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}); successful {
|
||||
return nil
|
||||
}
|
||||
if err == nil {
|
||||
err = fmt.Errorf("finalizer %s not added to object %s %s of resource %s", finalizer, objectNamespace, objectName, gvr)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -0,0 +1,36 @@
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-mockplugin-snapshotter
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-mockplugin-snapshotter
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-mockplugin-snapshotter
|
||||
spec:
|
||||
serviceAccountName: csi-mock
|
||||
containers:
|
||||
- name: csi-snapshotter
|
||||
image: k8s.gcr.io/sig-storage/csi-snapshotter:v3.0.2
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--leader-election=false"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
securityContext:
|
||||
privileged: true
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
volumes:
|
||||
- name: socket-dir
|
||||
hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-mock
|
||||
type: DirectoryOrCreate
|
@ -72,3 +72,16 @@ roleRef:
|
||||
kind: ClusterRole
|
||||
name: external-resizer-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-controller-snapshotter-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-mock
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: external-snapshotter-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
Loading…
Reference in New Issue
Block a user