mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-11 21:12:07 +00:00
Add ROX storage tests (#114628)
* Add ROX tests * Add capability and switch to secpod * addmissing file * remove extra param * run-formatter * add anticapability for resize w/ Restore + ROX * remove size checking in ROX tests * fix spacing * fix spacing
This commit is contained in:
parent
afeb78fd8f
commit
217196c59f
@ -185,7 +185,8 @@ const (
|
|||||||
// capacity information for it.
|
// capacity information for it.
|
||||||
CapCapacity Capability = "capacity"
|
CapCapacity Capability = "capacity"
|
||||||
|
|
||||||
// Anti-capability for drivers that do not support filesystem resizing of PVCs that are cloned or restored from a snapshot.
|
// Anti-capability for drivers that do not support filesystem resizing of PVCs
|
||||||
|
// that are cloned or restored from a snapshot.
|
||||||
CapFSResizeFromSourceNotSupported Capability = "FSResizeFromSourceNotSupported"
|
CapFSResizeFromSourceNotSupported Capability = "FSResizeFromSourceNotSupported"
|
||||||
|
|
||||||
// To support ReadWriteOncePod, the following CSI sidecars must be
|
// To support ReadWriteOncePod, the following CSI sidecars must be
|
||||||
@ -204,6 +205,9 @@ const (
|
|||||||
// talk to Kubernetes API server in any way should keep this capability enabled, because
|
// talk to Kubernetes API server in any way should keep this capability enabled, because
|
||||||
// they will see the same NodeStage / NodePublish requests as if only one PV existed.
|
// they will see the same NodeStage / NodePublish requests as if only one PV existed.
|
||||||
CapMultiplePVsSameID Capability = "multiplePVsSameID"
|
CapMultiplePVsSameID Capability = "multiplePVsSameID"
|
||||||
|
|
||||||
|
// The driver supports ReadOnlyMany (ROX) access mode
|
||||||
|
CapReadOnlyMany Capability = "capReadOnlyMany"
|
||||||
)
|
)
|
||||||
|
|
||||||
// DriverInfo represents static information about a TestDriver.
|
// DriverInfo represents static information about a TestDriver.
|
||||||
|
@ -19,12 +19,13 @@ package testsuites
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/v2"
|
"github.com/onsi/ginkgo/v2"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
|
|
||||||
@ -234,6 +235,48 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
|
|||||||
l.testCase.TestDynamicProvisioning(ctx)
|
l.testCase.TestDynamicProvisioning(ctx)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
ginkgo.It("should provision storage with snapshot data source (ROX mode) [Feature:VolumeSnapshotDataSource]", func(ctx context.Context) {
|
||||||
|
if !dInfo.Capabilities[storageframework.CapSnapshotDataSource] {
|
||||||
|
e2eskipper.Skipf("Driver %q does not support populating data from snapshot - skipping", dInfo.Name)
|
||||||
|
}
|
||||||
|
if !dInfo.SupportedFsType.Has(pattern.FsType) {
|
||||||
|
e2eskipper.Skipf("Driver %q does not support %q fs type - skipping", dInfo.Name, pattern.FsType)
|
||||||
|
}
|
||||||
|
if !dInfo.Capabilities[storageframework.CapReadOnlyMany] {
|
||||||
|
e2eskipper.Skipf("Driver %q does not support ROX access mode - skipping", dInfo.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
sDriver, ok := driver.(storageframework.SnapshottableTestDriver)
|
||||||
|
if !ok {
|
||||||
|
framework.Failf("Driver %q has CapSnapshotDataSource but does not implement SnapshottableTestDriver", dInfo.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
init(ctx)
|
||||||
|
|
||||||
|
dc := l.config.Framework.DynamicClient
|
||||||
|
testConfig := storageframework.ConvertTestConfig(l.config)
|
||||||
|
expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
|
||||||
|
dataSourceRef := prepareSnapshotDataSourceForProvisioning(ctx, f, testConfig, l.config, pattern, l.cs, dc, l.pvc, l.sc, sDriver, pattern.VolMode, expectedContent)
|
||||||
|
|
||||||
|
l.pvc.Spec.DataSourceRef = dataSourceRef
|
||||||
|
l.pvc.Spec.AccessModes = []v1.PersistentVolumeAccessMode{
|
||||||
|
v1.PersistentVolumeAccessMode(v1.ReadOnlyMany),
|
||||||
|
}
|
||||||
|
l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) {
|
||||||
|
ginkgo.By("checking whether the created volume has the pre-populated data")
|
||||||
|
tests := []e2evolume.Test{
|
||||||
|
{
|
||||||
|
Volume: *storageutils.CreateVolumeSource(claim.Name, false /* readOnly */),
|
||||||
|
Mode: pattern.VolMode,
|
||||||
|
File: "index.html",
|
||||||
|
ExpectedContent: expectedContent,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
e2evolume.TestVolumeClientSlow(ctx, f, testConfig, nil, "", tests)
|
||||||
|
}
|
||||||
|
l.testCase.TestDynamicProvisioning(ctx)
|
||||||
|
})
|
||||||
|
|
||||||
ginkgo.It("should provision storage with any volume data source [Serial]", func(ctx context.Context) {
|
ginkgo.It("should provision storage with any volume data source [Serial]", func(ctx context.Context) {
|
||||||
if len(dInfo.InTreePluginName) != 0 {
|
if len(dInfo.InTreePluginName) != 0 {
|
||||||
e2eskipper.Skipf("AnyVolumeDataSource feature only works with CSI drivers - skipping")
|
e2eskipper.Skipf("AnyVolumeDataSource feature only works with CSI drivers - skipping")
|
||||||
@ -527,6 +570,48 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
|
|||||||
l.testCase.TestDynamicProvisioning(ctx)
|
l.testCase.TestDynamicProvisioning(ctx)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
ginkgo.It("should provision storage with pvc data source (ROX mode)", func(ctx context.Context) {
|
||||||
|
if !dInfo.Capabilities[storageframework.CapPVCDataSource] {
|
||||||
|
e2eskipper.Skipf("Driver %q does not support cloning - skipping", dInfo.Name)
|
||||||
|
}
|
||||||
|
if !dInfo.Capabilities[storageframework.CapReadOnlyMany] {
|
||||||
|
e2eskipper.Skipf("Driver %q does not support ROX access mode - skipping", dInfo.Name)
|
||||||
|
}
|
||||||
|
init(ctx)
|
||||||
|
|
||||||
|
if l.config.ClientNodeSelection.Name == "" {
|
||||||
|
// Schedule all pods to the same topology segment (e.g. a cloud availability zone), some
|
||||||
|
// drivers don't support cloning across them.
|
||||||
|
if err := ensureTopologyRequirements(ctx, &l.config.ClientNodeSelection, l.cs, dInfo, 1); err != nil {
|
||||||
|
framework.Failf("Error setting topology requirements: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
testConfig := storageframework.ConvertTestConfig(l.config)
|
||||||
|
expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
|
||||||
|
dataSourceRef := preparePVCDataSourceForProvisioning(ctx, f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent)
|
||||||
|
l.pvc.Spec.DataSourceRef = dataSourceRef
|
||||||
|
l.pvc.Spec.AccessModes = []v1.PersistentVolumeAccessMode{
|
||||||
|
v1.PersistentVolumeAccessMode(v1.ReadOnlyMany),
|
||||||
|
}
|
||||||
|
l.testCase.NodeSelection = testConfig.ClientNodeSelection
|
||||||
|
l.testCase.PvCheck = func(ctx context.Context, claim *v1.PersistentVolumeClaim) {
|
||||||
|
ginkgo.By("checking whether the created volume has the pre-populated data")
|
||||||
|
tests := []e2evolume.Test{
|
||||||
|
{
|
||||||
|
Volume: *storageutils.CreateVolumeSource(claim.Name, false /* readOnly */),
|
||||||
|
Mode: pattern.VolMode,
|
||||||
|
File: "index.html",
|
||||||
|
ExpectedContent: expectedContent,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
e2evolume.TestVolumeClientSlow(ctx, f, testConfig, nil, "", tests)
|
||||||
|
}
|
||||||
|
// Cloning fails if the source disk is still in the process of detaching, so we wait for the VolumeAttachment to be removed before cloning.
|
||||||
|
volumeAttachment := e2evolume.GetVolumeAttachmentName(ctx, f.ClientSet, testConfig, l.testCase.Provisioner, dataSourceRef.Name, l.sourcePVC.Namespace)
|
||||||
|
e2evolume.WaitForVolumeAttachmentTerminated(ctx, volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision)
|
||||||
|
l.testCase.TestDynamicProvisioning(ctx)
|
||||||
|
})
|
||||||
|
|
||||||
ginkgo.It("should provision storage with pvc data source in parallel [Slow]", func(ctx context.Context) {
|
ginkgo.It("should provision storage with pvc data source in parallel [Slow]", func(ctx context.Context) {
|
||||||
// Test cloning a single volume multiple times.
|
// Test cloning a single volume multiple times.
|
||||||
if !dInfo.Capabilities[storageframework.CapPVCDataSource] {
|
if !dInfo.Capabilities[storageframework.CapPVCDataSource] {
|
||||||
|
Loading…
Reference in New Issue
Block a user