mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 09:52:49 +00:00
Extract testsuite api to a separate package
Extract TestSuite, TestDriver, TestPattern, TestConfig and VolumeResource, SnapshotVolumeResource from testsuite package and put them into a new package called api. The ultimate goal here is to make the testsuites as clean as possible. And only testsuites in the package.
This commit is contained in:
parent
5dc491758c
commit
988563f8f5
@ -12,12 +12,13 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/exec:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/storage/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -52,10 +52,11 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientexec "k8s.io/client-go/util/exec"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
uexec "k8s.io/utils/exec"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
@ -462,7 +463,7 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, fsGroup *int64, fsTy
|
||||
framework.ExpectNoError(err, "failed: finding the contents of the block device %s.", deviceName)
|
||||
|
||||
// Check that it's a real block device
|
||||
utils.CheckVolumeModeOfPath(f, pod, test.Mode, deviceName)
|
||||
CheckVolumeModeOfPath(f, pod, test.Mode, deviceName)
|
||||
} else {
|
||||
// Filesystem: check content
|
||||
fileName := fmt.Sprintf("/opt/%d/%s", i, test.File)
|
||||
@ -472,7 +473,7 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, fsGroup *int64, fsTy
|
||||
|
||||
// Check that a directory has been mounted
|
||||
dirName := filepath.Dir(fileName)
|
||||
utils.CheckVolumeModeOfPath(f, pod, test.Mode, dirName)
|
||||
CheckVolumeModeOfPath(f, pod, test.Mode, dirName)
|
||||
|
||||
if !framework.NodeOSDistroIs("windows") {
|
||||
// Filesystem: check fsgroup
|
||||
@ -698,3 +699,71 @@ func GetLinuxLabel() *v1.SELinuxOptions {
|
||||
return &v1.SELinuxOptions{
|
||||
Level: "s0:c0,c1"}
|
||||
}
|
||||
|
||||
// CheckVolumeModeOfPath check mode of volume
|
||||
func CheckVolumeModeOfPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
|
||||
if volMode == v1.PersistentVolumeBlock {
|
||||
// Check if block exists
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -b %s", path))
|
||||
|
||||
// Double check that it's not directory
|
||||
VerifyExecInPodFail(f, pod, fmt.Sprintf("test -d %s", path), 1)
|
||||
} else {
|
||||
// Check if directory exists
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -d %s", path))
|
||||
|
||||
// Double check that it's not block
|
||||
VerifyExecInPodFail(f, pod, fmt.Sprintf("test -b %s", path), 1)
|
||||
}
|
||||
}
|
||||
|
||||
// PodExec runs f.ExecCommandInContainerWithFullOutput to execute a shell cmd in target pod
|
||||
// TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245
|
||||
// is resolved. Otherwise there will be dependency issue.
|
||||
func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, string, error) {
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "powershell", "/c", shExec)
|
||||
}
|
||||
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
|
||||
|
||||
}
|
||||
|
||||
// VerifyExecInPodSucceed verifies shell cmd in target pod succeed
|
||||
// TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245
|
||||
// is resolved. Otherwise there will be dependency issue.
|
||||
func VerifyExecInPodSucceed(f *framework.Framework, pod *v1.Pod, shExec string) {
|
||||
stdout, stderr, err := PodExec(f, pod, shExec)
|
||||
if err != nil {
|
||||
|
||||
if exiterr, ok := err.(uexec.CodeExitError); ok {
|
||||
exitCode := exiterr.ExitStatus()
|
||||
framework.ExpectNoError(err,
|
||||
"%q should succeed, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
|
||||
shExec, exitCode, exiterr, stdout, stderr)
|
||||
} else {
|
||||
framework.ExpectNoError(err,
|
||||
"%q should succeed, but failed with error message %q\nstdout: %s\nstderr: %s",
|
||||
shExec, err, stdout, stderr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// VerifyExecInPodFail verifies shell cmd in target pod fail with certain exit code
|
||||
// TODO: put this under e2epod once https://github.com/kubernetes/kubernetes/issues/81245
|
||||
// is resolved. Otherwise there will be dependency issue.
|
||||
func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exitCode int) {
|
||||
stdout, stderr, err := PodExec(f, pod, shExec)
|
||||
if err != nil {
|
||||
if exiterr, ok := err.(clientexec.ExitError); ok {
|
||||
actualExitCode := exiterr.ExitStatus()
|
||||
framework.ExpectEqual(actualExitCode, exitCode,
|
||||
"%q should fail with exit code %d, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
|
||||
shExec, exitCode, actualExitCode, exiterr, stdout, stderr)
|
||||
} else {
|
||||
framework.ExpectNoError(err,
|
||||
"%q should fail with exit code %d, but failed with error message %q\nstdout: %s\nstderr: %s",
|
||||
shExec, exitCode, err, stdout, stderr)
|
||||
}
|
||||
}
|
||||
framework.ExpectError(err, "%q should fail with exit code %d, but exit without error", shExec, exitCode)
|
||||
}
|
||||
|
@ -88,8 +88,8 @@ go_library(
|
||||
"//test/e2e/framework/statefulset:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
"//test/e2e/storage/api:go_default_library",
|
||||
"//test/e2e/storage/drivers:go_default_library",
|
||||
"//test/e2e/storage/testpatterns:go_default_library",
|
||||
"//test/e2e/storage/testsuites:go_default_library",
|
||||
"//test/e2e/storage/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
@ -115,10 +115,10 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//test/e2e/storage/api:all-srcs",
|
||||
"//test/e2e/storage/drivers:all-srcs",
|
||||
"//test/e2e/storage/external:all-srcs",
|
||||
"//test/e2e/storage/podlogs:all-srcs",
|
||||
"//test/e2e/storage/testpatterns:all-srcs",
|
||||
"//test/e2e/storage/testsuites:all-srcs",
|
||||
"//test/e2e/storage/utils:all-srcs",
|
||||
"//test/e2e/storage/vsphere:all-srcs",
|
||||
|
52
test/e2e/storage/api/BUILD
Normal file
52
test/e2e/storage/api/BUILD
Normal file
@ -0,0 +1,52 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"driveroperations.go",
|
||||
"snapshotresource.go",
|
||||
"testconfig.go",
|
||||
"testdriver.go",
|
||||
"testpattern.go",
|
||||
"testsuite.go",
|
||||
"volumeresource.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/storage/api",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/pv:go_default_library",
|
||||
"//test/e2e/framework/skipper:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
"//test/e2e/storage/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testsuites
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -24,7 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
// GetDriverNameWithFeatureTags returns driver name with feature tags
|
||||
@ -38,15 +38,13 @@ func GetDriverNameWithFeatureTags(driver TestDriver) string {
|
||||
}
|
||||
|
||||
// CreateVolume creates volume for test unless dynamicPV or CSI ephemeral inline volume test
|
||||
func CreateVolume(driver TestDriver, config *PerTestConfig, volType testpatterns.TestVolType) TestVolume {
|
||||
func CreateVolume(driver TestDriver, config *PerTestConfig, volType TestVolType) TestVolume {
|
||||
switch volType {
|
||||
case testpatterns.InlineVolume, testpatterns.PreprovisionedPV:
|
||||
case InlineVolume, PreprovisionedPV:
|
||||
if pDriver, ok := driver.(PreprovisionedVolumeTestDriver); ok {
|
||||
return pDriver.CreateVolume(config, volType)
|
||||
}
|
||||
case testpatterns.CSIInlineVolume,
|
||||
testpatterns.GenericEphemeralVolume,
|
||||
testpatterns.DynamicPV:
|
||||
case CSIInlineVolume, GenericEphemeralVolume, DynamicPV:
|
||||
// No need to create volume
|
||||
default:
|
||||
framework.Failf("Invalid volType specified: %v", volType)
|
||||
@ -103,7 +101,7 @@ func GetSnapshotClass(
|
||||
snapshotClass := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": "VolumeSnapshotClass",
|
||||
"apiVersion": snapshotAPIVersion,
|
||||
"apiVersion": utils.SnapshotAPIVersion,
|
||||
"metadata": map[string]interface{}{
|
||||
// Name must be unique, so let's base it on namespace name and use GenerateName
|
||||
// TODO(#96234): Remove unnecessary suffix.
|
372
test/e2e/storage/api/snapshotresource.go
Normal file
372
test/e2e/storage/api/snapshotresource.go
Normal file
@ -0,0 +1,372 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
// SnapshotResource represents a snapshot class, a snapshot and its bound snapshot contents for a specific test case
|
||||
type SnapshotResource struct {
|
||||
Config *PerTestConfig
|
||||
Pattern TestPattern
|
||||
|
||||
Vs *unstructured.Unstructured
|
||||
Vscontent *unstructured.Unstructured
|
||||
Vsclass *unstructured.Unstructured
|
||||
}
|
||||
|
||||
// CreateSnapshot creates a VolumeSnapshotClass with given SnapshotDeletionPolicy and a VolumeSnapshot
|
||||
// from the VolumeSnapshotClass using a dynamic client.
|
||||
// Returns the unstructured VolumeSnapshotClass and VolumeSnapshot objects.
|
||||
func CreateSnapshot(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern TestPattern, pvcName string, pvcNamespace string, timeouts *framework.TimeoutContext) (*unstructured.Unstructured, *unstructured.Unstructured) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
var err error
|
||||
if pattern.SnapshotType != DynamicCreatedSnapshot && pattern.SnapshotType != PreprovisionedCreatedSnapshot {
|
||||
err = fmt.Errorf("SnapshotType must be set to either DynamicCreatedSnapshot or PreprovisionedCreatedSnapshot")
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
dc := config.Framework.DynamicClient
|
||||
|
||||
ginkgo.By("creating a SnapshotClass")
|
||||
sclass := sDriver.GetSnapshotClass(config)
|
||||
if sclass == nil {
|
||||
framework.Failf("Failed to get snapshot class based on test config")
|
||||
}
|
||||
sclass.Object["deletionPolicy"] = pattern.SnapshotDeletionPolicy.String()
|
||||
|
||||
sclass, err = dc.Resource(utils.SnapshotClassGVR).Create(context.TODO(), sclass, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
sclass, err = dc.Resource(utils.SnapshotClassGVR).Get(context.TODO(), sclass.GetName(), metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("creating a dynamic VolumeSnapshot")
|
||||
// prepare a dynamically provisioned volume snapshot with certain data
|
||||
snapshot := getSnapshot(pvcName, pvcNamespace, sclass.GetName())
|
||||
|
||||
snapshot, err = dc.Resource(utils.SnapshotGVR).Namespace(snapshot.GetNamespace()).Create(context.TODO(), snapshot, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
return sclass, snapshot
|
||||
}
|
||||
|
||||
// GetSnapshotContentFromSnapshot returns the VolumeSnapshotContent object Bound to a
|
||||
// given VolumeSnapshot
|
||||
func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured.Unstructured) *unstructured.Unstructured {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
err := utils.WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
vs, err := dc.Resource(utils.SnapshotGVR).Namespace(snapshot.GetNamespace()).Get(context.TODO(), snapshot.GetName(), metav1.GetOptions{})
|
||||
|
||||
snapshotStatus := vs.Object["status"].(map[string]interface{})
|
||||
snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string)
|
||||
framework.Logf("received snapshotStatus %v", snapshotStatus)
|
||||
framework.Logf("snapshotContentName %s", snapshotContentName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
vscontent, err := dc.Resource(utils.SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
return vscontent
|
||||
|
||||
}
|
||||
|
||||
// CreateSnapshotResource creates a snapshot resource for the current test. It knows how to deal with
|
||||
// different test pattern snapshot provisioning and deletion policy
|
||||
func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern TestPattern, pvcName string, pvcNamespace string, timeouts *framework.TimeoutContext) *SnapshotResource {
|
||||
var err error
|
||||
r := SnapshotResource{
|
||||
Config: config,
|
||||
Pattern: pattern,
|
||||
}
|
||||
r.Vsclass, r.Vs = CreateSnapshot(sDriver, config, pattern, pvcName, pvcNamespace, timeouts)
|
||||
|
||||
dc := r.Config.Framework.DynamicClient
|
||||
|
||||
r.Vscontent = GetSnapshotContentFromSnapshot(dc, r.Vs)
|
||||
|
||||
if pattern.SnapshotType == PreprovisionedCreatedSnapshot {
|
||||
// prepare a pre-provisioned VolumeSnapshotContent with certain data
|
||||
// Because this could be run with an external CSI driver, we have no way
|
||||
// to pre-provision the snapshot as we normally would using their API.
|
||||
// We instead dynamically take a snapshot (above step), delete the old snapshot,
|
||||
// and create another snapshot using the first snapshot's snapshot handle.
|
||||
|
||||
ginkgo.By("updating the snapshot content deletion policy to retain")
|
||||
r.Vscontent.Object["spec"].(map[string]interface{})["deletionPolicy"] = "Retain"
|
||||
|
||||
r.Vscontent, err = dc.Resource(utils.SnapshotContentGVR).Update(context.TODO(), r.Vscontent, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("recording the volume handle and snapshotHandle")
|
||||
snapshotHandle := r.Vscontent.Object["status"].(map[string]interface{})["snapshotHandle"].(string)
|
||||
framework.Logf("Recording snapshot handle: %s", snapshotHandle)
|
||||
csiDriverName := r.Vsclass.Object["driver"].(string)
|
||||
|
||||
// If the deletion policy is retain on vscontent:
|
||||
// when vs is deleted vscontent will not be deleted
|
||||
// when the vscontent is manually deleted then the underlying snapshot resource will not be deleted.
|
||||
// We exploit this to create a snapshot resource from which we can create a preprovisioned snapshot
|
||||
ginkgo.By("deleting the snapshot and snapshot content")
|
||||
err = dc.Resource(utils.SnapshotGVR).Namespace(r.Vs.GetNamespace()).Delete(context.TODO(), r.Vs.GetName(), metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
err = nil
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("checking the Snapshot has been deleted")
|
||||
err = utils.WaitForNamespacedGVRDeletion(dc, utils.SnapshotGVR, r.Vs.GetName(), r.Vs.GetNamespace(), framework.Poll, timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = dc.Resource(utils.SnapshotContentGVR).Delete(context.TODO(), r.Vscontent.GetName(), metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
err = nil
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("checking the Snapshot content has been deleted")
|
||||
err = utils.WaitForGVRDeletion(dc, utils.SnapshotContentGVR, r.Vscontent.GetName(), framework.Poll, timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("creating a snapshot content with the snapshot handle")
|
||||
uuid := uuid.NewUUID()
|
||||
|
||||
snapName := getPreProvisionedSnapshotName(uuid)
|
||||
snapcontentName := getPreProvisionedSnapshotContentName(uuid)
|
||||
|
||||
r.Vscontent = getPreProvisionedSnapshotContent(snapcontentName, snapName, pvcNamespace, snapshotHandle, pattern.SnapshotDeletionPolicy.String(), csiDriverName)
|
||||
r.Vscontent, err = dc.Resource(utils.SnapshotContentGVR).Create(context.TODO(), r.Vscontent, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("creating a snapshot with that snapshot content")
|
||||
r.Vs = getPreProvisionedSnapshot(snapName, pvcNamespace, snapcontentName)
|
||||
r.Vs, err = dc.Resource(utils.SnapshotGVR).Namespace(r.Vs.GetNamespace()).Create(context.TODO(), r.Vs, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = utils.WaitForSnapshotReady(dc, r.Vs.GetNamespace(), r.Vs.GetName(), framework.Poll, timeouts.SnapshotCreate)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("getting the snapshot and snapshot content")
|
||||
r.Vs, err = dc.Resource(utils.SnapshotGVR).Namespace(r.Vs.GetNamespace()).Get(context.TODO(), r.Vs.GetName(), metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
r.Vscontent, err = dc.Resource(utils.SnapshotContentGVR).Get(context.TODO(), r.Vscontent.GetName(), metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
return &r
|
||||
}
|
||||
|
||||
// CleanupResource cleans up the snapshot resource and ignores not found errors
|
||||
func (sr *SnapshotResource) CleanupResource(timeouts *framework.TimeoutContext) error {
|
||||
var err error
|
||||
var cleanupErrs []error
|
||||
|
||||
dc := sr.Config.Framework.DynamicClient
|
||||
|
||||
if sr.Vs != nil {
|
||||
framework.Logf("deleting snapshot %q/%q", sr.Vs.GetNamespace(), sr.Vs.GetName())
|
||||
|
||||
sr.Vs, err = dc.Resource(utils.SnapshotGVR).Namespace(sr.Vs.GetNamespace()).Get(context.TODO(), sr.Vs.GetName(), metav1.GetOptions{})
|
||||
switch {
|
||||
case err == nil:
|
||||
snapshotStatus := sr.Vs.Object["status"].(map[string]interface{})
|
||||
snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string)
|
||||
framework.Logf("received snapshotStatus %v", snapshotStatus)
|
||||
framework.Logf("snapshotContentName %s", snapshotContentName)
|
||||
|
||||
boundVsContent, err := dc.Resource(utils.SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{})
|
||||
switch {
|
||||
case err == nil:
|
||||
if boundVsContent.Object["spec"].(map[string]interface{})["deletionPolicy"] != "Delete" {
|
||||
// The purpose of this block is to prevent physical snapshotContent leaks.
|
||||
// We must update the SnapshotContent to have Delete Deletion policy,
|
||||
// or else the physical snapshot content will be leaked.
|
||||
boundVsContent.Object["spec"].(map[string]interface{})["deletionPolicy"] = "Delete"
|
||||
boundVsContent, err = dc.Resource(utils.SnapshotContentGVR).Update(context.TODO(), boundVsContent, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
err = dc.Resource(utils.SnapshotGVR).Namespace(sr.Vs.GetNamespace()).Delete(context.TODO(), sr.Vs.GetName(), metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
err = nil
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = utils.WaitForGVRDeletion(dc, utils.SnapshotContentGVR, boundVsContent.GetName(), framework.Poll, timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
case apierrors.IsNotFound(err):
|
||||
// the volume snapshot is not bound to snapshot content yet
|
||||
err = dc.Resource(utils.SnapshotGVR).Namespace(sr.Vs.GetNamespace()).Delete(context.TODO(), sr.Vs.GetName(), metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
err = nil
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = utils.WaitForNamespacedGVRDeletion(dc, utils.SnapshotGVR, sr.Vs.GetName(), sr.Vs.GetNamespace(), framework.Poll, timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
default:
|
||||
cleanupErrs = append(cleanupErrs, err)
|
||||
}
|
||||
case apierrors.IsNotFound(err):
|
||||
// Hope that the underlying snapshot content and resource is gone already
|
||||
default:
|
||||
cleanupErrs = append(cleanupErrs, err)
|
||||
}
|
||||
}
|
||||
if sr.Vscontent != nil {
|
||||
framework.Logf("deleting snapshot content %q", sr.Vscontent.GetName())
|
||||
|
||||
sr.Vscontent, err = dc.Resource(utils.SnapshotContentGVR).Get(context.TODO(), sr.Vscontent.GetName(), metav1.GetOptions{})
|
||||
switch {
|
||||
case err == nil:
|
||||
if sr.Vscontent.Object["spec"].(map[string]interface{})["deletionPolicy"] != "Delete" {
|
||||
// The purpose of this block is to prevent physical snapshotContent leaks.
|
||||
// We must update the SnapshotContent to have Delete Deletion policy,
|
||||
// or else the physical snapshot content will be leaked.
|
||||
sr.Vscontent.Object["spec"].(map[string]interface{})["deletionPolicy"] = "Delete"
|
||||
sr.Vscontent, err = dc.Resource(utils.SnapshotContentGVR).Update(context.TODO(), sr.Vscontent, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
err = dc.Resource(utils.SnapshotContentGVR).Delete(context.TODO(), sr.Vscontent.GetName(), metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
err = nil
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = utils.WaitForGVRDeletion(dc, utils.SnapshotContentGVR, sr.Vscontent.GetName(), framework.Poll, timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
case apierrors.IsNotFound(err):
|
||||
// Hope the underlying physical snapshot resource has been deleted already
|
||||
default:
|
||||
cleanupErrs = append(cleanupErrs, err)
|
||||
}
|
||||
}
|
||||
if sr.Vsclass != nil {
|
||||
framework.Logf("deleting snapshot class %q", sr.Vsclass.GetName())
|
||||
// typically this snapshot class has already been deleted
|
||||
err = dc.Resource(utils.SnapshotClassGVR).Delete(context.TODO(), sr.Vsclass.GetName(), metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
framework.Failf("Error deleting snapshot class %q. Error: %v", sr.Vsclass.GetName(), err)
|
||||
}
|
||||
err = utils.WaitForGVRDeletion(dc, utils.SnapshotClassGVR, sr.Vsclass.GetName(), framework.Poll, timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
return utilerrors.NewAggregate(cleanupErrs)
|
||||
}
|
||||
|
||||
// DeleteAndWaitSnapshot deletes a VolumeSnapshot and waits for it to be deleted or until timeout occurs, whichever comes first
|
||||
func DeleteAndWaitSnapshot(dc dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error {
|
||||
var err error
|
||||
ginkgo.By("deleting the snapshot")
|
||||
err = dc.Resource(utils.SnapshotGVR).Namespace(ns).Delete(context.TODO(), snapshotName, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
ginkgo.By("checking the Snapshot has been deleted")
|
||||
err = utils.WaitForNamespacedGVRDeletion(dc, utils.SnapshotGVR, ns, snapshotName, poll, timeout)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func getSnapshot(claimName string, ns, snapshotClassName string) *unstructured.Unstructured {
|
||||
snapshot := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": "VolumeSnapshot",
|
||||
"apiVersion": utils.SnapshotAPIVersion,
|
||||
"metadata": map[string]interface{}{
|
||||
"generateName": "snapshot-",
|
||||
"namespace": ns,
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"volumeSnapshotClassName": snapshotClassName,
|
||||
"source": map[string]interface{}{
|
||||
"persistentVolumeClaimName": claimName,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return snapshot
|
||||
}
|
||||
func getPreProvisionedSnapshot(snapName, ns, snapshotContentName string) *unstructured.Unstructured {
|
||||
snapshot := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": "VolumeSnapshot",
|
||||
"apiVersion": utils.SnapshotAPIVersion,
|
||||
"metadata": map[string]interface{}{
|
||||
"name": snapName,
|
||||
"namespace": ns,
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"source": map[string]interface{}{
|
||||
"volumeSnapshotContentName": snapshotContentName,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return snapshot
|
||||
}
|
||||
func getPreProvisionedSnapshotContent(snapcontentName, snapshotName, snapshotNamespace, snapshotHandle, deletionPolicy, csiDriverName string) *unstructured.Unstructured {
|
||||
snapshotContent := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": "VolumeSnapshotContent",
|
||||
"apiVersion": utils.SnapshotAPIVersion,
|
||||
"metadata": map[string]interface{}{
|
||||
"name": snapcontentName,
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"source": map[string]interface{}{
|
||||
"snapshotHandle": snapshotHandle,
|
||||
},
|
||||
"volumeSnapshotRef": map[string]interface{}{
|
||||
"name": snapshotName,
|
||||
"namespace": snapshotNamespace,
|
||||
},
|
||||
"driver": csiDriverName,
|
||||
"deletionPolicy": deletionPolicy,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return snapshotContent
|
||||
}
|
||||
|
||||
func getPreProvisionedSnapshotContentName(uuid types.UID) string {
|
||||
return fmt.Sprintf("pre-provisioned-snapcontent-%s", string(uuid))
|
||||
}
|
||||
|
||||
func getPreProvisionedSnapshotName(uuid types.UID) string {
|
||||
return fmt.Sprintf("pre-provisioned-snapshot-%s", string(uuid))
|
||||
}
|
78
test/e2e/storage/api/testconfig.go
Normal file
78
test/e2e/storage/api/testconfig.go
Normal file
@ -0,0 +1,78 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
)
|
||||
|
||||
// PerTestConfig represents parameters that control test execution.
|
||||
// One instance gets allocated for each test and is then passed
|
||||
// via pointer to functions involved in the test.
|
||||
type PerTestConfig struct {
|
||||
// The test driver for the test.
|
||||
Driver TestDriver
|
||||
|
||||
// Some short word that gets inserted into dynamically
|
||||
// generated entities (pods, paths) as first part of the name
|
||||
// to make debugging easier. Can be the same for different
|
||||
// tests inside the test suite.
|
||||
Prefix string
|
||||
|
||||
// The framework instance allocated for the current test.
|
||||
Framework *framework.Framework
|
||||
|
||||
// If non-empty, Pods using a volume will be scheduled
|
||||
// according to the NodeSelection. Otherwise Kubernetes will
|
||||
// pick a node.
|
||||
ClientNodeSelection e2epod.NodeSelection
|
||||
|
||||
// Some test drivers initialize a storage server. This is
|
||||
// the configuration that then has to be used to run tests.
|
||||
// The values above are ignored for such tests.
|
||||
ServerConfig *e2evolume.TestConfig
|
||||
|
||||
// Some drivers run in their own namespace
|
||||
DriverNamespace *v1.Namespace
|
||||
}
|
||||
|
||||
// GetUniqueDriverName returns unique driver name that can be used parallelly in tests
|
||||
func (config *PerTestConfig) GetUniqueDriverName() string {
|
||||
return config.Driver.GetDriverInfo().Name + "-" + config.Framework.UniqueName
|
||||
}
|
||||
|
||||
// ConvertTestConfig returns a framework test config with the
|
||||
// parameters specified for the testsuite or (if available) the
|
||||
// dynamically created config for the volume server.
|
||||
//
|
||||
// This is done because TestConfig is the public API for
|
||||
// the testsuites package whereas volume.TestConfig is merely
|
||||
// an implementation detail. It contains fields that have no effect.
|
||||
func ConvertTestConfig(in *PerTestConfig) e2evolume.TestConfig {
|
||||
if in.ServerConfig != nil {
|
||||
return *in.ServerConfig
|
||||
}
|
||||
|
||||
return e2evolume.TestConfig{
|
||||
Namespace: in.Framework.Namespace.Name,
|
||||
Prefix: in.Prefix,
|
||||
ClientNodeSelection: in.ClientNodeSelection,
|
||||
}
|
||||
}
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testsuites
|
||||
package api
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@ -22,9 +22,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
)
|
||||
|
||||
// TestDriver represents an interface for a driver to be tested in TestSuite.
|
||||
@ -44,7 +42,7 @@ type TestDriver interface {
|
||||
// expensive resources like framework.Framework. Tests that
|
||||
// depend on a connection to the cluster can be done in
|
||||
// PrepareTest once the framework is ready.
|
||||
SkipUnsupportedTest(testpatterns.TestPattern)
|
||||
SkipUnsupportedTest(TestPattern)
|
||||
|
||||
// PrepareTest is called at test execution time each time a new test case is about to start.
|
||||
// It sets up all necessary resources and returns the per-test configuration
|
||||
@ -63,7 +61,7 @@ type TestVolume interface {
|
||||
type PreprovisionedVolumeTestDriver interface {
|
||||
TestDriver
|
||||
// CreateVolume creates a pre-provisioned volume of the desired volume type.
|
||||
CreateVolume(config *PerTestConfig, volumeType testpatterns.TestVolType) TestVolume
|
||||
CreateVolume(config *PerTestConfig, volumeType TestVolType) TestVolume
|
||||
}
|
||||
|
||||
// InlineVolumeTestDriver represents an interface for a TestDriver that supports InlineVolume
|
||||
@ -135,7 +133,8 @@ type CustomTimeoutsTestDriver interface {
|
||||
GetTimeouts() *framework.TimeoutContext
|
||||
}
|
||||
|
||||
func getDriverTimeouts(driver TestDriver) *framework.TimeoutContext {
|
||||
// GetDriverTimeouts returns the timeout of the driver operation
|
||||
func GetDriverTimeouts(driver TestDriver) *framework.TimeoutContext {
|
||||
if d, ok := driver.(CustomTimeoutsTestDriver); ok {
|
||||
return d.GetTimeouts()
|
||||
}
|
||||
@ -225,38 +224,3 @@ type VolumeSnapshotStressTestOptions struct {
|
||||
// Number of snapshots to create for each volume.
|
||||
NumSnapshots int
|
||||
}
|
||||
|
||||
// PerTestConfig represents parameters that control test execution.
|
||||
// One instance gets allocated for each test and is then passed
|
||||
// via pointer to functions involved in the test.
|
||||
type PerTestConfig struct {
|
||||
// The test driver for the test.
|
||||
Driver TestDriver
|
||||
|
||||
// Some short word that gets inserted into dynamically
|
||||
// generated entities (pods, paths) as first part of the name
|
||||
// to make debugging easier. Can be the same for different
|
||||
// tests inside the test suite.
|
||||
Prefix string
|
||||
|
||||
// The framework instance allocated for the current test.
|
||||
Framework *framework.Framework
|
||||
|
||||
// If non-empty, Pods using a volume will be scheduled
|
||||
// according to the NodeSelection. Otherwise Kubernetes will
|
||||
// pick a node.
|
||||
ClientNodeSelection e2epod.NodeSelection
|
||||
|
||||
// Some test drivers initialize a storage server. This is
|
||||
// the configuration that then has to be used to run tests.
|
||||
// The values above are ignored for such tests.
|
||||
ServerConfig *e2evolume.TestConfig
|
||||
|
||||
// Some drivers run in their own namespace
|
||||
DriverNamespace *v1.Namespace
|
||||
}
|
||||
|
||||
// GetUniqueDriverName returns unique driver name that can be used parallelly in tests
|
||||
func (config *PerTestConfig) GetUniqueDriverName() string {
|
||||
return config.Driver.GetDriverInfo().Name + "-" + config.Framework.UniqueName
|
||||
}
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testpatterns
|
||||
package api
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
129
test/e2e/storage/api/testsuite.go
Normal file
129
test/e2e/storage/api/testsuite.go
Normal file
@ -0,0 +1,129 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
)
|
||||
|
||||
// TestSuite represents an interface for a set of tests which works with TestDriver.
|
||||
// Each testsuite should implement this interface.
|
||||
// All the functions except GetTestSuiteInfo() should not be called directly. Instead,
|
||||
// use RegisterTests() to register the tests in a more standard way.
|
||||
type TestSuite interface {
|
||||
GetTestSuiteInfo() TestSuiteInfo
|
||||
// DefineTests defines tests of the testpattern for the driver.
|
||||
// Called inside a Ginkgo context that reflects the current driver and test pattern,
|
||||
// so the test suite can define tests directly with ginkgo.It.
|
||||
DefineTests(TestDriver, TestPattern)
|
||||
// SkipUnsupportedTests will skip the test suite based on the given TestPattern, TestDriver
|
||||
// Testsuite should check if the given pattern and driver works for the "whole testsuite"
|
||||
// Testcase specific check should happen inside defineTests
|
||||
SkipUnsupportedTests(TestDriver, TestPattern)
|
||||
}
|
||||
|
||||
// RegisterTests register the driver + pattern combination to the inside TestSuite
|
||||
// This function actually register tests inside testsuite
|
||||
func RegisterTests(suite TestSuite, driver TestDriver, pattern TestPattern) {
|
||||
tsInfo := suite.GetTestSuiteInfo()
|
||||
testName := fmt.Sprintf("[Testpattern: %s]%s %s%s", pattern.Name, pattern.FeatureTag, tsInfo.Name, tsInfo.FeatureTag)
|
||||
ginkgo.Context(testName, func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
// skip all the invalid combination of driver and pattern
|
||||
SkipInvalidDriverPatternCombination(driver, pattern)
|
||||
// skip the unsupported test pattern and driver combination specific for this TestSuite
|
||||
suite.SkipUnsupportedTests(driver, pattern)
|
||||
})
|
||||
// actually define the tests
|
||||
// at this step the testsuite should not worry about if the pattern and driver
|
||||
// does not fit for the whole testsuite. But driver&pattern check
|
||||
// might still needed for specific independent test cases.
|
||||
suite.DefineTests(driver, pattern)
|
||||
})
|
||||
}
|
||||
|
||||
// DefineTestSuites defines tests for all testpatterns and all testSuites for a driver
|
||||
func DefineTestSuites(driver TestDriver, tsInits []func() TestSuite) {
|
||||
for _, testSuiteInit := range tsInits {
|
||||
suite := testSuiteInit()
|
||||
for _, pattern := range suite.GetTestSuiteInfo().TestPatterns {
|
||||
RegisterTests(suite, driver, pattern)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestSuiteInfo represents a set of parameters for TestSuite
|
||||
type TestSuiteInfo struct {
|
||||
Name string // name of the TestSuite
|
||||
FeatureTag string // featureTag for the TestSuite
|
||||
TestPatterns []TestPattern // Slice of TestPattern for the TestSuite
|
||||
SupportedSizeRange e2evolume.SizeRange // Size range supported by the test suite
|
||||
}
|
||||
|
||||
// SkipInvalidDriverPatternCombination will skip tests if the combination of driver, and testpattern
|
||||
// is not compatible to be tested. This function will be called in the RegisterTests() to make
|
||||
// sure all the testsuites we defined are valid.
|
||||
//
|
||||
// Whether it needs to be skipped is checked by following steps:
|
||||
// 0. Check with driver SkipUnsupportedTest
|
||||
// 1. Check if volType is supported by driver from its interface
|
||||
// 2. Check if fsType is supported
|
||||
//
|
||||
// Test suites can also skip tests inside their own skipUnsupportedTests function or in
|
||||
// individual tests.
|
||||
func SkipInvalidDriverPatternCombination(driver TestDriver, pattern TestPattern) {
|
||||
dInfo := driver.GetDriverInfo()
|
||||
var isSupported bool
|
||||
|
||||
// 0. Check with driver specific logic
|
||||
driver.SkipUnsupportedTest(pattern)
|
||||
|
||||
// 1. Check if Whether volType is supported by driver from its interface
|
||||
switch pattern.VolType {
|
||||
case InlineVolume:
|
||||
_, isSupported = driver.(InlineVolumeTestDriver)
|
||||
case PreprovisionedPV:
|
||||
_, isSupported = driver.(PreprovisionedPVTestDriver)
|
||||
case DynamicPV, GenericEphemeralVolume:
|
||||
_, isSupported = driver.(DynamicPVTestDriver)
|
||||
case CSIInlineVolume:
|
||||
_, isSupported = driver.(EphemeralTestDriver)
|
||||
default:
|
||||
isSupported = false
|
||||
}
|
||||
|
||||
if !isSupported {
|
||||
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
|
||||
}
|
||||
|
||||
// 2. Check if fsType is supported
|
||||
if !dInfo.SupportedFsType.Has(pattern.FsType) {
|
||||
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.FsType)
|
||||
}
|
||||
if pattern.FsType == "xfs" && framework.NodeOSDistroIs("windows") {
|
||||
e2eskipper.Skipf("Distro doesn't support xfs -- skipping")
|
||||
}
|
||||
if pattern.FsType == "ntfs" && !framework.NodeOSDistroIs("windows") {
|
||||
e2eskipper.Skipf("Distro %s doesn't support ntfs -- skipping", framework.TestContext.NodeOSDistro)
|
||||
}
|
||||
}
|
317
test/e2e/storage/api/volumeresource.go
Normal file
317
test/e2e/storage/api/volumeresource.go
Normal file
@ -0,0 +1,317 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/pkg/errors"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
// VolumeResource is a generic implementation of TestResource that wil be able to
|
||||
// be used in most of TestSuites.
|
||||
// See volume_io.go or volumes.go in test/e2e/storage/testsuites/ for how to use this resource.
|
||||
// Also, see subpath.go in the same directory for how to extend and use it.
|
||||
type VolumeResource struct {
|
||||
Config *PerTestConfig
|
||||
Pattern TestPattern
|
||||
VolSource *v1.VolumeSource
|
||||
Pvc *v1.PersistentVolumeClaim
|
||||
Pv *v1.PersistentVolume
|
||||
Sc *storagev1.StorageClass
|
||||
|
||||
Volume TestVolume
|
||||
}
|
||||
|
||||
// CreateVolumeResource constructs a VolumeResource for the current test. It knows how to deal with
|
||||
// different test pattern volume types.
|
||||
func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern TestPattern, testVolumeSizeRange e2evolume.SizeRange) *VolumeResource {
|
||||
r := VolumeResource{
|
||||
Config: config,
|
||||
Pattern: pattern,
|
||||
}
|
||||
dInfo := driver.GetDriverInfo()
|
||||
f := config.Framework
|
||||
cs := f.ClientSet
|
||||
|
||||
// Create volume for pre-provisioned volume tests
|
||||
r.Volume = CreateVolume(driver, config, pattern.VolType)
|
||||
|
||||
switch pattern.VolType {
|
||||
case InlineVolume:
|
||||
framework.Logf("Creating resource for inline volume")
|
||||
if iDriver, ok := driver.(InlineVolumeTestDriver); ok {
|
||||
r.VolSource = iDriver.GetVolumeSource(false, pattern.FsType, r.Volume)
|
||||
}
|
||||
case PreprovisionedPV:
|
||||
framework.Logf("Creating resource for pre-provisioned PV")
|
||||
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
|
||||
pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, pattern.FsType, r.Volume)
|
||||
if pvSource != nil {
|
||||
r.Pv, r.Pvc = createPVCPV(f, dInfo.Name, pvSource, volumeNodeAffinity, pattern.VolMode, dInfo.RequiredAccessModes)
|
||||
r.VolSource = storageutils.CreateVolumeSource(r.Pvc.Name, false /* readOnly */)
|
||||
}
|
||||
}
|
||||
case DynamicPV, GenericEphemeralVolume:
|
||||
framework.Logf("Creating resource for dynamic PV")
|
||||
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
|
||||
var err error
|
||||
driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange
|
||||
claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
|
||||
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)
|
||||
framework.Logf("Using claimSize:%s, test suite supported size:%v, driver(%s) supported size:%v ", claimSize, testVolumeSizeRange, dDriver.GetDriverInfo().Name, testVolumeSizeRange)
|
||||
r.Sc = dDriver.GetDynamicProvisionStorageClass(r.Config, pattern.FsType)
|
||||
|
||||
if pattern.BindingMode != "" {
|
||||
r.Sc.VolumeBindingMode = &pattern.BindingMode
|
||||
}
|
||||
r.Sc.AllowVolumeExpansion = &pattern.AllowExpansion
|
||||
|
||||
ginkgo.By("creating a StorageClass " + r.Sc.Name)
|
||||
|
||||
r.Sc, err = cs.StorageV1().StorageClasses().Create(context.TODO(), r.Sc, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
switch pattern.VolType {
|
||||
case DynamicPV:
|
||||
r.Pv, r.Pvc = createPVCPVFromDynamicProvisionSC(
|
||||
f, dInfo.Name, claimSize, r.Sc, pattern.VolMode, dInfo.RequiredAccessModes)
|
||||
r.VolSource = storageutils.CreateVolumeSource(r.Pvc.Name, false /* readOnly */)
|
||||
case GenericEphemeralVolume:
|
||||
driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange
|
||||
claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
|
||||
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)
|
||||
r.VolSource = createEphemeralVolumeSource(r.Sc.Name, dInfo.RequiredAccessModes, claimSize, false /* readOnly */)
|
||||
}
|
||||
}
|
||||
case CSIInlineVolume:
|
||||
framework.Logf("Creating resource for CSI ephemeral inline volume")
|
||||
if eDriver, ok := driver.(EphemeralTestDriver); ok {
|
||||
attributes, _, _ := eDriver.GetVolume(config, 0)
|
||||
r.VolSource = &v1.VolumeSource{
|
||||
CSI: &v1.CSIVolumeSource{
|
||||
Driver: eDriver.GetCSIDriverName(config),
|
||||
VolumeAttributes: attributes,
|
||||
},
|
||||
}
|
||||
}
|
||||
default:
|
||||
framework.Failf("VolumeResource doesn't support: %s", pattern.VolType)
|
||||
}
|
||||
|
||||
if r.VolSource == nil {
|
||||
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
|
||||
}
|
||||
|
||||
return &r
|
||||
}
|
||||
|
||||
func createEphemeralVolumeSource(scName string, accessModes []v1.PersistentVolumeAccessMode, claimSize string, readOnly bool) *v1.VolumeSource {
|
||||
if len(accessModes) == 0 {
|
||||
accessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||
}
|
||||
return &v1.VolumeSource{
|
||||
Ephemeral: &v1.EphemeralVolumeSource{
|
||||
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
StorageClassName: &scName,
|
||||
AccessModes: accessModes,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse(claimSize),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// CleanupResource cleans up VolumeResource
|
||||
func (r *VolumeResource) CleanupResource() error {
|
||||
f := r.Config.Framework
|
||||
var cleanUpErrs []error
|
||||
if r.Pvc != nil || r.Pv != nil {
|
||||
switch r.Pattern.VolType {
|
||||
case PreprovisionedPV:
|
||||
ginkgo.By("Deleting pv and pvc")
|
||||
if errs := e2epv.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.Pv, r.Pvc); len(errs) != 0 {
|
||||
framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs))
|
||||
}
|
||||
case DynamicPV:
|
||||
ginkgo.By("Deleting pvc")
|
||||
// We only delete the PVC so that PV (and disk) can be cleaned up by dynamic provisioner
|
||||
if r.Pv != nil && r.Pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete {
|
||||
framework.Failf("Test framework does not currently support Dynamically Provisioned Persistent Volume %v specified with reclaim policy that isnt %v",
|
||||
r.Pv.Name, v1.PersistentVolumeReclaimDelete)
|
||||
}
|
||||
if r.Pvc != nil {
|
||||
cs := f.ClientSet
|
||||
pv := r.Pv
|
||||
if pv == nil && r.Pvc.Name != "" {
|
||||
// This happens for late binding. Check whether we have a volume now that we need to wait for.
|
||||
pvc, err := cs.CoreV1().PersistentVolumeClaims(r.Pvc.Namespace).Get(context.TODO(), r.Pvc.Name, metav1.GetOptions{})
|
||||
switch {
|
||||
case err == nil:
|
||||
if pvc.Spec.VolumeName != "" {
|
||||
pv, err = cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to find PV %v", pvc.Spec.VolumeName))
|
||||
}
|
||||
}
|
||||
case apierrors.IsNotFound(err):
|
||||
// Without the PVC, we cannot locate the corresponding PV. Let's
|
||||
// hope that it is gone.
|
||||
default:
|
||||
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to find PVC %v", r.Pvc.Name))
|
||||
}
|
||||
}
|
||||
|
||||
err := e2epv.DeletePersistentVolumeClaim(f.ClientSet, r.Pvc.Name, f.Namespace.Name)
|
||||
if err != nil {
|
||||
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to delete PVC %v", r.Pvc.Name))
|
||||
}
|
||||
|
||||
if pv != nil {
|
||||
err = e2epv.WaitForPersistentVolumeDeleted(f.ClientSet, pv.Name, 5*time.Second, 5*time.Minute)
|
||||
if err != nil {
|
||||
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err,
|
||||
"Persistent Volume %v not deleted by dynamic provisioner", pv.Name))
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
framework.Failf("Found PVC (%v) or PV (%v) but not running Preprovisioned or Dynamic test pattern", r.Pvc, r.Pv)
|
||||
}
|
||||
}
|
||||
|
||||
if r.Sc != nil {
|
||||
ginkgo.By("Deleting sc")
|
||||
if err := storageutils.DeleteStorageClass(f.ClientSet, r.Sc.Name); err != nil {
|
||||
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to delete StorageClass %v", r.Sc.Name))
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup volume for pre-provisioned volume tests
|
||||
if r.Volume != nil {
|
||||
if err := storageutils.TryFunc(r.Volume.DeleteVolume); err != nil {
|
||||
cleanUpErrs = append(cleanUpErrs, errors.Wrap(err, "Failed to delete Volume"))
|
||||
}
|
||||
}
|
||||
return utilerrors.NewAggregate(cleanUpErrs)
|
||||
}
|
||||
|
||||
func createPVCPV(
|
||||
f *framework.Framework,
|
||||
name string,
|
||||
pvSource *v1.PersistentVolumeSource,
|
||||
volumeNodeAffinity *v1.VolumeNodeAffinity,
|
||||
volMode v1.PersistentVolumeMode,
|
||||
accessModes []v1.PersistentVolumeAccessMode,
|
||||
) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
||||
pvConfig := e2epv.PersistentVolumeConfig{
|
||||
NamePrefix: fmt.Sprintf("%s-", name),
|
||||
StorageClassName: f.Namespace.Name,
|
||||
PVSource: *pvSource,
|
||||
NodeAffinity: volumeNodeAffinity,
|
||||
AccessModes: accessModes,
|
||||
}
|
||||
|
||||
pvcConfig := e2epv.PersistentVolumeClaimConfig{
|
||||
StorageClassName: &f.Namespace.Name,
|
||||
AccessModes: accessModes,
|
||||
}
|
||||
|
||||
if volMode != "" {
|
||||
pvConfig.VolumeMode = &volMode
|
||||
pvcConfig.VolumeMode = &volMode
|
||||
}
|
||||
|
||||
framework.Logf("Creating PVC and PV")
|
||||
pv, pvc, err := e2epv.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false)
|
||||
framework.ExpectNoError(err, "PVC, PV creation failed")
|
||||
|
||||
err = e2epv.WaitOnPVandPVC(f.ClientSet, f.Timeouts, f.Namespace.Name, pv, pvc)
|
||||
framework.ExpectNoError(err, "PVC, PV failed to bind")
|
||||
|
||||
return pv, pvc
|
||||
}
|
||||
|
||||
func createPVCPVFromDynamicProvisionSC(
|
||||
f *framework.Framework,
|
||||
name string,
|
||||
claimSize string,
|
||||
sc *storagev1.StorageClass,
|
||||
volMode v1.PersistentVolumeMode,
|
||||
accessModes []v1.PersistentVolumeAccessMode,
|
||||
) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
||||
cs := f.ClientSet
|
||||
ns := f.Namespace.Name
|
||||
|
||||
ginkgo.By("creating a claim")
|
||||
pvcCfg := e2epv.PersistentVolumeClaimConfig{
|
||||
NamePrefix: name,
|
||||
ClaimSize: claimSize,
|
||||
StorageClassName: &(sc.Name),
|
||||
AccessModes: accessModes,
|
||||
VolumeMode: &volMode,
|
||||
}
|
||||
|
||||
pvc := e2epv.MakePersistentVolumeClaim(pvcCfg, ns)
|
||||
|
||||
var err error
|
||||
pvc, err = e2epv.CreatePVC(cs, ns, pvc)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if !isDelayedBinding(sc) {
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, f.Timeouts.ClaimProvision)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
var pv *v1.PersistentVolume
|
||||
if !isDelayedBinding(sc) {
|
||||
pv, err = cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
return pv, pvc
|
||||
}
|
||||
|
||||
func isDelayedBinding(sc *storagev1.StorageClass) bool {
|
||||
if sc.VolumeBindingMode != nil {
|
||||
return *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer
|
||||
}
|
||||
return false
|
||||
}
|
@ -51,8 +51,8 @@ import (
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@ -121,13 +121,13 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
|
||||
type mockDriverSetup struct {
|
||||
cs clientset.Interface
|
||||
config *testsuites.PerTestConfig
|
||||
config *storageapi.PerTestConfig
|
||||
testCleanups []func()
|
||||
pods []*v1.Pod
|
||||
pvcs []*v1.PersistentVolumeClaim
|
||||
sc map[string]*storagev1.StorageClass
|
||||
vsc map[string]*unstructured.Unstructured
|
||||
driver testsuites.TestDriver
|
||||
driver storageapi.TestDriver
|
||||
provisioner string
|
||||
tp testParameters
|
||||
}
|
||||
@ -189,7 +189,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
createPod := func(ephemeral bool) (class *storagev1.StorageClass, claim *v1.PersistentVolumeClaim, pod *v1.Pod) {
|
||||
ginkgo.By("Creating pod")
|
||||
var sc *storagev1.StorageClass
|
||||
if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok {
|
||||
if dDriver, ok := m.driver.(storageapi.DynamicPVTestDriver); ok {
|
||||
sc = dDriver.GetDynamicProvisionStorageClass(m.config, "")
|
||||
}
|
||||
scTest := testsuites.StorageClassTest{
|
||||
@ -238,7 +238,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
ginkgo.By("Creating pod with fsGroup")
|
||||
nodeSelection := m.config.ClientNodeSelection
|
||||
var sc *storagev1.StorageClass
|
||||
if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok {
|
||||
if dDriver, ok := m.driver.(storageapi.DynamicPVTestDriver); ok {
|
||||
sc = dDriver.GetDynamicProvisionStorageClass(m.config, "")
|
||||
}
|
||||
scTest := testsuites.StorageClassTest{
|
||||
@ -296,7 +296,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
|
||||
for _, vsc := range m.vsc {
|
||||
ginkgo.By(fmt.Sprintf("Deleting volumesnapshotclass %s", vsc.GetName()))
|
||||
m.config.Framework.DynamicClient.Resource(testsuites.SnapshotClassGVR).Delete(context.TODO(), vsc.GetName(), metav1.DeleteOptions{})
|
||||
m.config.Framework.DynamicClient.Resource(utils.SnapshotClassGVR).Delete(context.TODO(), vsc.GetName(), metav1.DeleteOptions{})
|
||||
}
|
||||
ginkgo.By("Cleaning up resources")
|
||||
for _, cleanupFunc := range m.testCleanups {
|
||||
@ -1247,7 +1247,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
enableSnapshot: true,
|
||||
javascriptHooks: scripts,
|
||||
})
|
||||
sDriver, ok := m.driver.(testsuites.SnapshottableTestDriver)
|
||||
sDriver, ok := m.driver.(storageapi.SnapshottableTestDriver)
|
||||
if !ok {
|
||||
e2eskipper.Skipf("mock driver %s does not support snapshots -- skipping", m.driver.GetDriverInfo().Name)
|
||||
|
||||
@ -1257,7 +1257,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
defer cleanup()
|
||||
|
||||
var sc *storagev1.StorageClass
|
||||
if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok {
|
||||
if dDriver, ok := m.driver.(storageapi.DynamicPVTestDriver); ok {
|
||||
sc = dDriver.GetDynamicProvisionStorageClass(m.config, "")
|
||||
}
|
||||
ginkgo.By("Creating storage class")
|
||||
@ -1272,7 +1272,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
|
||||
ginkgo.By("Creating snapshot")
|
||||
// TODO: Test VolumeSnapshots with Retain policy
|
||||
snapshotClass, snapshot := testsuites.CreateSnapshot(sDriver, m.config, testpatterns.DynamicSnapshotDelete, claim.Name, claim.Namespace, f.Timeouts)
|
||||
snapshotClass, snapshot := storageapi.CreateSnapshot(sDriver, m.config, storageapi.DynamicSnapshotDelete, claim.Name, claim.Namespace, f.Timeouts)
|
||||
framework.ExpectNoError(err, "failed to create snapshot")
|
||||
m.vsc[snapshotClass.GetName()] = snapshotClass
|
||||
volumeSnapshotName := snapshot.GetName()
|
||||
@ -1306,19 +1306,19 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Get VolumeSnapshotContent bound to VolumeSnapshot %s", snapshot.GetName()))
|
||||
snapshotContent := testsuites.GetSnapshotContentFromSnapshot(m.config.Framework.DynamicClient, snapshot)
|
||||
snapshotContent := storageapi.GetSnapshotContentFromSnapshot(m.config.Framework.DynamicClient, snapshot)
|
||||
volumeSnapshotContentName := snapshotContent.GetName()
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Verify VolumeSnapshotContent %s contains finalizer %s", snapshot.GetName(), volumeSnapshotContentFinalizer))
|
||||
err = utils.WaitForGVRFinalizer(ctx, m.config.Framework.DynamicClient, testsuites.SnapshotContentGVR, volumeSnapshotContentName, "", volumeSnapshotContentFinalizer, 1*time.Millisecond, 1*time.Minute)
|
||||
err = utils.WaitForGVRFinalizer(ctx, m.config.Framework.DynamicClient, utils.SnapshotContentGVR, volumeSnapshotContentName, "", volumeSnapshotContentFinalizer, 1*time.Millisecond, 1*time.Minute)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Delete VolumeSnapshotContent %s", snapshotContent.GetName()))
|
||||
err = m.config.Framework.DynamicClient.Resource(testsuites.SnapshotContentGVR).Delete(ctx, snapshotContent.GetName(), metav1.DeleteOptions{})
|
||||
err = m.config.Framework.DynamicClient.Resource(utils.SnapshotContentGVR).Delete(ctx, snapshotContent.GetName(), metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "Failed to delete snapshotcontent: %v", err)
|
||||
|
||||
ginkgo.By("Get VolumeSnapshotContent from API server and verify deletion timestamp is set")
|
||||
snapshotContent, err = m.config.Framework.DynamicClient.Resource(testsuites.SnapshotContentGVR).Get(context.TODO(), snapshotContent.GetName(), metav1.GetOptions{})
|
||||
snapshotContent, err = m.config.Framework.DynamicClient.Resource(utils.SnapshotContentGVR).Get(context.TODO(), snapshotContent.GetName(), metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if snapshotContent.GetDeletionTimestamp() == nil {
|
||||
@ -1332,15 +1332,15 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Verify VolumeSnapshot %s contains finalizer %s", snapshot.GetName(), volumeSnapshotBoundFinalizer))
|
||||
err = utils.WaitForGVRFinalizer(ctx, m.config.Framework.DynamicClient, testsuites.SnapshotGVR, volumeSnapshotName, f.Namespace.Name, volumeSnapshotBoundFinalizer, 1*time.Millisecond, 1*time.Minute)
|
||||
err = utils.WaitForGVRFinalizer(ctx, m.config.Framework.DynamicClient, utils.SnapshotGVR, volumeSnapshotName, f.Namespace.Name, volumeSnapshotBoundFinalizer, 1*time.Millisecond, 1*time.Minute)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Delete VolumeSnapshot")
|
||||
err = testsuites.DeleteAndWaitSnapshot(m.config.Framework.DynamicClient, f.Namespace.Name, volumeSnapshotName, framework.Poll, framework.SnapshotDeleteTimeout)
|
||||
err = storageapi.DeleteAndWaitSnapshot(m.config.Framework.DynamicClient, f.Namespace.Name, volumeSnapshotName, framework.Poll, framework.SnapshotDeleteTimeout)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("failed to delete VolumeSnapshot %s", volumeSnapshotName))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Wait for VolumeSnapshotContent %s to be deleted", volumeSnapshotContentName))
|
||||
err = utils.WaitForGVRDeletion(m.config.Framework.DynamicClient, testsuites.SnapshotContentGVR, volumeSnapshotContentName, framework.Poll, framework.SnapshotDeleteTimeout)
|
||||
err = utils.WaitForGVRDeletion(m.config.Framework.DynamicClient, utils.SnapshotContentGVR, volumeSnapshotContentName, framework.Poll, framework.SnapshotDeleteTimeout)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("failed to delete VolumeSnapshotContent %s", volumeSnapshotContentName))
|
||||
})
|
||||
}
|
||||
@ -1462,19 +1462,19 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
|
||||
// Create the subdirectory to ensure that fsGroup propagates
|
||||
createDirectory := fmt.Sprintf("mkdir %s", dirName)
|
||||
_, _, err = utils.PodExec(f, pod, createDirectory)
|
||||
_, _, err = e2evolume.PodExec(f, pod, createDirectory)
|
||||
framework.ExpectNoError(err, "failed: creating the directory: %s", err)
|
||||
|
||||
// Inject the contents onto the mount
|
||||
createFile := fmt.Sprintf("echo '%s' > '%s'; sync", "filecontents", fileName)
|
||||
_, _, err = utils.PodExec(f, pod, createFile)
|
||||
_, _, err = e2evolume.PodExec(f, pod, createFile)
|
||||
framework.ExpectNoError(err, "failed: writing the contents: %s", err)
|
||||
|
||||
// Delete the created file. This step is mandatory, as the mock driver
|
||||
// won't clean up the contents automatically.
|
||||
defer func() {
|
||||
delete := fmt.Sprintf("rm -fr %s", dirName)
|
||||
_, _, err = utils.PodExec(f, pod, delete)
|
||||
_, _, err = e2evolume.PodExec(f, pod, delete)
|
||||
framework.ExpectNoError(err, "failed: deleting the directory: %s", err)
|
||||
}()
|
||||
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package storage
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/test/e2e/storage/api"
|
||||
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
@ -25,7 +26,7 @@ import (
|
||||
)
|
||||
|
||||
// List of testDrivers to be executed in below loop
|
||||
var csiTestDrivers = []func() testsuites.TestDriver{
|
||||
var csiTestDrivers = []func() api.TestDriver{
|
||||
drivers.InitHostPathCSIDriver,
|
||||
drivers.InitGcePDCSIDriver,
|
||||
// Don't run tests with mock driver (drivers.InitMockCSIDriver), it does not provide persistent storage.
|
||||
@ -36,8 +37,8 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
|
||||
for _, initDriver := range csiTestDrivers {
|
||||
curDriver := initDriver()
|
||||
|
||||
ginkgo.Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() {
|
||||
testsuites.DefineTestSuites(curDriver, testsuites.CSISuites)
|
||||
ginkgo.Context(api.GetDriverNameWithFeatureTags(curDriver), func() {
|
||||
api.DefineTestSuites(curDriver, testsuites.CSISuites)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
@ -29,8 +29,7 @@ go_library(
|
||||
"//test/e2e/framework/pv:go_default_library",
|
||||
"//test/e2e/framework/skipper:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
"//test/e2e/storage/testpatterns:go_default_library",
|
||||
"//test/e2e/storage/testsuites:go_default_library",
|
||||
"//test/e2e/storage/api:go_default_library",
|
||||
"//test/e2e/storage/utils:go_default_library",
|
||||
"//test/e2e/storage/vsphere:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
|
@ -57,8 +57,7 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
@ -71,18 +70,18 @@ const (
|
||||
|
||||
// hostpathCSI
|
||||
type hostpathCSIDriver struct {
|
||||
driverInfo testsuites.DriverInfo
|
||||
driverInfo storageapi.DriverInfo
|
||||
manifests []string
|
||||
cleanupHandle framework.CleanupActionHandle
|
||||
volumeAttributes []map[string]string
|
||||
}
|
||||
|
||||
func initHostPathCSIDriver(name string, capabilities map[testsuites.Capability]bool, volumeAttributes []map[string]string, manifests ...string) testsuites.TestDriver {
|
||||
func initHostPathCSIDriver(name string, capabilities map[storageapi.Capability]bool, volumeAttributes []map[string]string, manifests ...string) storageapi.TestDriver {
|
||||
return &hostpathCSIDriver{
|
||||
driverInfo: testsuites.DriverInfo{
|
||||
driverInfo: storageapi.DriverInfo{
|
||||
Name: name,
|
||||
FeatureTag: "",
|
||||
MaxFileSize: testpatterns.FileSizeMedium,
|
||||
MaxFileSize: storageapi.FileSizeMedium,
|
||||
SupportedFsType: sets.NewString(
|
||||
"", // Default fsType
|
||||
),
|
||||
@ -90,11 +89,11 @@ func initHostPathCSIDriver(name string, capabilities map[testsuites.Capability]b
|
||||
Min: "1Mi",
|
||||
},
|
||||
Capabilities: capabilities,
|
||||
StressTestOptions: &testsuites.StressTestOptions{
|
||||
StressTestOptions: &storageapi.StressTestOptions{
|
||||
NumPods: 10,
|
||||
NumRestarts: 10,
|
||||
},
|
||||
VolumeSnapshotStressTestOptions: &testsuites.VolumeSnapshotStressTestOptions{
|
||||
VolumeSnapshotStressTestOptions: &storageapi.VolumeSnapshotStressTestOptions{
|
||||
NumPods: 10,
|
||||
NumSnapshots: 10,
|
||||
},
|
||||
@ -104,22 +103,22 @@ func initHostPathCSIDriver(name string, capabilities map[testsuites.Capability]b
|
||||
}
|
||||
}
|
||||
|
||||
var _ testsuites.TestDriver = &hostpathCSIDriver{}
|
||||
var _ testsuites.DynamicPVTestDriver = &hostpathCSIDriver{}
|
||||
var _ testsuites.SnapshottableTestDriver = &hostpathCSIDriver{}
|
||||
var _ testsuites.EphemeralTestDriver = &hostpathCSIDriver{}
|
||||
var _ storageapi.TestDriver = &hostpathCSIDriver{}
|
||||
var _ storageapi.DynamicPVTestDriver = &hostpathCSIDriver{}
|
||||
var _ storageapi.SnapshottableTestDriver = &hostpathCSIDriver{}
|
||||
var _ storageapi.EphemeralTestDriver = &hostpathCSIDriver{}
|
||||
|
||||
// InitHostPathCSIDriver returns hostpathCSIDriver that implements TestDriver interface
|
||||
func InitHostPathCSIDriver() testsuites.TestDriver {
|
||||
capabilities := map[testsuites.Capability]bool{
|
||||
testsuites.CapPersistence: true,
|
||||
testsuites.CapSnapshotDataSource: true,
|
||||
testsuites.CapMultiPODs: true,
|
||||
testsuites.CapBlock: true,
|
||||
testsuites.CapPVCDataSource: true,
|
||||
testsuites.CapControllerExpansion: true,
|
||||
testsuites.CapSingleNodeVolume: true,
|
||||
testsuites.CapVolumeLimits: true,
|
||||
func InitHostPathCSIDriver() storageapi.TestDriver {
|
||||
capabilities := map[storageapi.Capability]bool{
|
||||
storageapi.CapPersistence: true,
|
||||
storageapi.CapSnapshotDataSource: true,
|
||||
storageapi.CapMultiPODs: true,
|
||||
storageapi.CapBlock: true,
|
||||
storageapi.CapPVCDataSource: true,
|
||||
storageapi.CapControllerExpansion: true,
|
||||
storageapi.CapSingleNodeVolume: true,
|
||||
storageapi.CapVolumeLimits: true,
|
||||
}
|
||||
return initHostPathCSIDriver("csi-hostpath",
|
||||
capabilities,
|
||||
@ -141,56 +140,56 @@ func InitHostPathCSIDriver() testsuites.TestDriver {
|
||||
)
|
||||
}
|
||||
|
||||
func (h *hostpathCSIDriver) GetDriverInfo() *testsuites.DriverInfo {
|
||||
func (h *hostpathCSIDriver) GetDriverInfo() *storageapi.DriverInfo {
|
||||
return &h.driverInfo
|
||||
}
|
||||
|
||||
func (h *hostpathCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
||||
if pattern.VolType == testpatterns.CSIInlineVolume && len(h.volumeAttributes) == 0 {
|
||||
func (h *hostpathCSIDriver) SkipUnsupportedTest(pattern storageapi.TestPattern) {
|
||||
if pattern.VolType == storageapi.CSIInlineVolume && len(h.volumeAttributes) == 0 {
|
||||
e2eskipper.Skipf("%s has no volume attributes defined, doesn't support ephemeral inline volumes", h.driverInfo.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *hostpathCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||
func (h *hostpathCSIDriver) GetDynamicProvisionStorageClass(config *storageapi.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||
provisioner := config.GetUniqueDriverName()
|
||||
parameters := map[string]string{}
|
||||
ns := config.Framework.Namespace.Name
|
||||
suffix := fmt.Sprintf("%s-sc", provisioner)
|
||||
|
||||
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
||||
return storageapi.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
||||
}
|
||||
|
||||
func (h *hostpathCSIDriver) GetVolume(config *testsuites.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) {
|
||||
func (h *hostpathCSIDriver) GetVolume(config *storageapi.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) {
|
||||
return h.volumeAttributes[volumeNumber%len(h.volumeAttributes)], false /* not shared */, false /* read-write */
|
||||
}
|
||||
|
||||
func (h *hostpathCSIDriver) GetCSIDriverName(config *testsuites.PerTestConfig) string {
|
||||
func (h *hostpathCSIDriver) GetCSIDriverName(config *storageapi.PerTestConfig) string {
|
||||
return config.GetUniqueDriverName()
|
||||
}
|
||||
|
||||
func (h *hostpathCSIDriver) GetSnapshotClass(config *testsuites.PerTestConfig) *unstructured.Unstructured {
|
||||
func (h *hostpathCSIDriver) GetSnapshotClass(config *storageapi.PerTestConfig) *unstructured.Unstructured {
|
||||
snapshotter := config.GetUniqueDriverName()
|
||||
parameters := map[string]string{}
|
||||
ns := config.Framework.Namespace.Name
|
||||
suffix := fmt.Sprintf("%s-vsc", snapshotter)
|
||||
|
||||
return testsuites.GetSnapshotClass(snapshotter, parameters, ns, suffix)
|
||||
return storageapi.GetSnapshotClass(snapshotter, parameters, ns, suffix)
|
||||
}
|
||||
|
||||
func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||
func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*storageapi.PerTestConfig, func()) {
|
||||
// Create secondary namespace which will be used for creating driver
|
||||
driverNamespace := utils.CreateDriverNamespace(f)
|
||||
ns2 := driverNamespace.Name
|
||||
ns1 := f.Namespace.Name
|
||||
|
||||
ginkgo.By(fmt.Sprintf("deploying %s driver", h.driverInfo.Name))
|
||||
cancelLogging := testsuites.StartPodLogs(f, driverNamespace)
|
||||
cancelLogging := utils.StartPodLogs(f, driverNamespace)
|
||||
cs := f.ClientSet
|
||||
|
||||
// The hostpath CSI driver only works when everything runs on the same node.
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(cs)
|
||||
framework.ExpectNoError(err)
|
||||
config := &testsuites.PerTestConfig{
|
||||
config := &storageapi.PerTestConfig{
|
||||
Driver: h,
|
||||
Prefix: "hostpath",
|
||||
Framework: f,
|
||||
@ -243,7 +242,7 @@ func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.Per
|
||||
|
||||
// mockCSI
|
||||
type mockCSIDriver struct {
|
||||
driverInfo testsuites.DriverInfo
|
||||
driverInfo storageapi.DriverInfo
|
||||
manifests []string
|
||||
podInfo *bool
|
||||
storageCapacity *bool
|
||||
@ -275,12 +274,12 @@ type CSIMockDriverOpts struct {
|
||||
FSGroupPolicy *storagev1.FSGroupPolicy
|
||||
}
|
||||
|
||||
var _ testsuites.TestDriver = &mockCSIDriver{}
|
||||
var _ testsuites.DynamicPVTestDriver = &mockCSIDriver{}
|
||||
var _ testsuites.SnapshottableTestDriver = &mockCSIDriver{}
|
||||
var _ storageapi.TestDriver = &mockCSIDriver{}
|
||||
var _ storageapi.DynamicPVTestDriver = &mockCSIDriver{}
|
||||
var _ storageapi.SnapshottableTestDriver = &mockCSIDriver{}
|
||||
|
||||
// InitMockCSIDriver returns a mockCSIDriver that implements TestDriver interface
|
||||
func InitMockCSIDriver(driverOpts CSIMockDriverOpts) testsuites.TestDriver {
|
||||
func InitMockCSIDriver(driverOpts CSIMockDriverOpts) storageapi.TestDriver {
|
||||
driverManifests := []string{
|
||||
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
|
||||
@ -308,18 +307,18 @@ func InitMockCSIDriver(driverOpts CSIMockDriverOpts) testsuites.TestDriver {
|
||||
}
|
||||
|
||||
return &mockCSIDriver{
|
||||
driverInfo: testsuites.DriverInfo{
|
||||
driverInfo: storageapi.DriverInfo{
|
||||
Name: "csi-mock",
|
||||
FeatureTag: "",
|
||||
MaxFileSize: testpatterns.FileSizeMedium,
|
||||
MaxFileSize: storageapi.FileSizeMedium,
|
||||
SupportedFsType: sets.NewString(
|
||||
"", // Default fsType
|
||||
),
|
||||
Capabilities: map[testsuites.Capability]bool{
|
||||
testsuites.CapPersistence: false,
|
||||
testsuites.CapFsGroup: false,
|
||||
testsuites.CapExec: false,
|
||||
testsuites.CapVolumeLimits: true,
|
||||
Capabilities: map[storageapi.Capability]bool{
|
||||
storageapi.CapPersistence: false,
|
||||
storageapi.CapFsGroup: false,
|
||||
storageapi.CapExec: false,
|
||||
storageapi.CapVolumeLimits: true,
|
||||
},
|
||||
},
|
||||
manifests: driverManifests,
|
||||
@ -336,45 +335,45 @@ func InitMockCSIDriver(driverOpts CSIMockDriverOpts) testsuites.TestDriver {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mockCSIDriver) GetDriverInfo() *testsuites.DriverInfo {
|
||||
func (m *mockCSIDriver) GetDriverInfo() *storageapi.DriverInfo {
|
||||
return &m.driverInfo
|
||||
}
|
||||
|
||||
func (m *mockCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
||||
func (m *mockCSIDriver) SkipUnsupportedTest(pattern storageapi.TestPattern) {
|
||||
}
|
||||
|
||||
func (m *mockCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||
func (m *mockCSIDriver) GetDynamicProvisionStorageClass(config *storageapi.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||
provisioner := config.GetUniqueDriverName()
|
||||
parameters := map[string]string{}
|
||||
ns := config.Framework.Namespace.Name
|
||||
suffix := fmt.Sprintf("%s-sc", provisioner)
|
||||
|
||||
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
||||
return storageapi.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
||||
}
|
||||
|
||||
func (m *mockCSIDriver) GetSnapshotClass(config *testsuites.PerTestConfig) *unstructured.Unstructured {
|
||||
func (m *mockCSIDriver) GetSnapshotClass(config *storageapi.PerTestConfig) *unstructured.Unstructured {
|
||||
parameters := map[string]string{}
|
||||
snapshotter := m.driverInfo.Name + "-" + config.Framework.UniqueName
|
||||
ns := config.Framework.Namespace.Name
|
||||
suffix := fmt.Sprintf("%s-vsc", snapshotter)
|
||||
|
||||
return testsuites.GetSnapshotClass(snapshotter, parameters, ns, suffix)
|
||||
return storageapi.GetSnapshotClass(snapshotter, parameters, ns, suffix)
|
||||
}
|
||||
|
||||
func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||
func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*storageapi.PerTestConfig, func()) {
|
||||
// Create secondary namespace which will be used for creating driver
|
||||
driverNamespace := utils.CreateDriverNamespace(f)
|
||||
ns2 := driverNamespace.Name
|
||||
ns1 := f.Namespace.Name
|
||||
|
||||
ginkgo.By("deploying csi mock driver")
|
||||
cancelLogging := testsuites.StartPodLogs(f, driverNamespace)
|
||||
cancelLogging := utils.StartPodLogs(f, driverNamespace)
|
||||
cs := f.ClientSet
|
||||
|
||||
// pods should be scheduled on the node
|
||||
node, err := e2enode.GetRandomReadySchedulableNode(cs)
|
||||
framework.ExpectNoError(err)
|
||||
config := &testsuites.PerTestConfig{
|
||||
config := &storageapi.PerTestConfig{
|
||||
Driver: m,
|
||||
Prefix: "mock",
|
||||
Framework: f,
|
||||
@ -481,21 +480,21 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest
|
||||
|
||||
// gce-pd
|
||||
type gcePDCSIDriver struct {
|
||||
driverInfo testsuites.DriverInfo
|
||||
driverInfo storageapi.DriverInfo
|
||||
cleanupHandle framework.CleanupActionHandle
|
||||
}
|
||||
|
||||
var _ testsuites.TestDriver = &gcePDCSIDriver{}
|
||||
var _ testsuites.DynamicPVTestDriver = &gcePDCSIDriver{}
|
||||
var _ testsuites.SnapshottableTestDriver = &gcePDCSIDriver{}
|
||||
var _ storageapi.TestDriver = &gcePDCSIDriver{}
|
||||
var _ storageapi.DynamicPVTestDriver = &gcePDCSIDriver{}
|
||||
var _ storageapi.SnapshottableTestDriver = &gcePDCSIDriver{}
|
||||
|
||||
// InitGcePDCSIDriver returns gcePDCSIDriver that implements TestDriver interface
|
||||
func InitGcePDCSIDriver() testsuites.TestDriver {
|
||||
func InitGcePDCSIDriver() storageapi.TestDriver {
|
||||
return &gcePDCSIDriver{
|
||||
driverInfo: testsuites.DriverInfo{
|
||||
driverInfo: storageapi.DriverInfo{
|
||||
Name: GCEPDCSIDriverName,
|
||||
FeatureTag: "[Serial]",
|
||||
MaxFileSize: testpatterns.FileSizeMedium,
|
||||
MaxFileSize: storageapi.FileSizeMedium,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
Min: "5Gi",
|
||||
},
|
||||
@ -507,27 +506,27 @@ func InitGcePDCSIDriver() testsuites.TestDriver {
|
||||
"xfs",
|
||||
),
|
||||
SupportedMountOption: sets.NewString("debug", "nouid32"),
|
||||
Capabilities: map[testsuites.Capability]bool{
|
||||
testsuites.CapPersistence: true,
|
||||
testsuites.CapBlock: true,
|
||||
testsuites.CapFsGroup: true,
|
||||
testsuites.CapExec: true,
|
||||
testsuites.CapMultiPODs: true,
|
||||
Capabilities: map[storageapi.Capability]bool{
|
||||
storageapi.CapPersistence: true,
|
||||
storageapi.CapBlock: true,
|
||||
storageapi.CapFsGroup: true,
|
||||
storageapi.CapExec: true,
|
||||
storageapi.CapMultiPODs: true,
|
||||
// GCE supports volume limits, but the test creates large
|
||||
// number of volumes and times out test suites.
|
||||
testsuites.CapVolumeLimits: false,
|
||||
testsuites.CapTopology: true,
|
||||
testsuites.CapControllerExpansion: true,
|
||||
testsuites.CapNodeExpansion: true,
|
||||
testsuites.CapSnapshotDataSource: true,
|
||||
storageapi.CapVolumeLimits: false,
|
||||
storageapi.CapTopology: true,
|
||||
storageapi.CapControllerExpansion: true,
|
||||
storageapi.CapNodeExpansion: true,
|
||||
storageapi.CapSnapshotDataSource: true,
|
||||
},
|
||||
RequiredAccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
|
||||
TopologyKeys: []string{GCEPDCSIZoneTopologyKey},
|
||||
StressTestOptions: &testsuites.StressTestOptions{
|
||||
StressTestOptions: &storageapi.StressTestOptions{
|
||||
NumPods: 10,
|
||||
NumRestarts: 10,
|
||||
},
|
||||
VolumeSnapshotStressTestOptions: &testsuites.VolumeSnapshotStressTestOptions{
|
||||
VolumeSnapshotStressTestOptions: &storageapi.VolumeSnapshotStressTestOptions{
|
||||
// GCE only allows for one snapshot per volume to be created at a time,
|
||||
// which can cause test timeouts. We reduce the likelihood of test timeouts
|
||||
// by increasing the number of pods (and volumes) and reducing the number
|
||||
@ -539,11 +538,11 @@ func InitGcePDCSIDriver() testsuites.TestDriver {
|
||||
}
|
||||
}
|
||||
|
||||
func (g *gcePDCSIDriver) GetDriverInfo() *testsuites.DriverInfo {
|
||||
func (g *gcePDCSIDriver) GetDriverInfo() *storageapi.DriverInfo {
|
||||
return &g.driverInfo
|
||||
}
|
||||
|
||||
func (g *gcePDCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
||||
func (g *gcePDCSIDriver) SkipUnsupportedTest(pattern storageapi.TestPattern) {
|
||||
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
||||
if pattern.FsType == "xfs" {
|
||||
e2eskipper.SkipUnlessNodeOSDistroIs("ubuntu", "custom")
|
||||
@ -553,7 +552,7 @@ func (g *gcePDCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
||||
}
|
||||
}
|
||||
|
||||
func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||
func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(config *storageapi.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||
ns := config.Framework.Namespace.Name
|
||||
provisioner := g.driverInfo.Name
|
||||
suffix := fmt.Sprintf("%s-sc", g.driverInfo.Name)
|
||||
@ -564,26 +563,26 @@ func (g *gcePDCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerT
|
||||
}
|
||||
delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer
|
||||
|
||||
return testsuites.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix)
|
||||
return storageapi.GetStorageClass(provisioner, parameters, &delayedBinding, ns, suffix)
|
||||
}
|
||||
|
||||
func (g *gcePDCSIDriver) GetSnapshotClass(config *testsuites.PerTestConfig) *unstructured.Unstructured {
|
||||
func (g *gcePDCSIDriver) GetSnapshotClass(config *storageapi.PerTestConfig) *unstructured.Unstructured {
|
||||
parameters := map[string]string{}
|
||||
snapshotter := g.driverInfo.Name
|
||||
ns := config.Framework.Namespace.Name
|
||||
suffix := fmt.Sprintf("%s-vsc", snapshotter)
|
||||
|
||||
return testsuites.GetSnapshotClass(snapshotter, parameters, ns, suffix)
|
||||
return storageapi.GetSnapshotClass(snapshotter, parameters, ns, suffix)
|
||||
}
|
||||
|
||||
func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||
func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*storageapi.PerTestConfig, func()) {
|
||||
ginkgo.By("deploying csi gce-pd driver")
|
||||
// Create secondary namespace which will be used for creating driver
|
||||
driverNamespace := utils.CreateDriverNamespace(f)
|
||||
ns2 := driverNamespace.Name
|
||||
ns1 := f.Namespace.Name
|
||||
|
||||
cancelLogging := testsuites.StartPodLogs(f, driverNamespace)
|
||||
cancelLogging := utils.StartPodLogs(f, driverNamespace)
|
||||
// It would be safer to rename the gcePD driver, but that
|
||||
// hasn't been done before either and attempts to do so now led to
|
||||
// errors during driver registration, therefore it is disabled
|
||||
@ -592,7 +591,7 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTes
|
||||
// These are the options which would have to be used:
|
||||
// o := utils.PatchCSIOptions{
|
||||
// OldDriverName: g.driverInfo.Name,
|
||||
// NewDriverName: testsuites.GetUniqueDriverName(g),
|
||||
// NewDriverName: storageapi.GetUniqueDriverName(g),
|
||||
// DriverContainerName: "gce-driver",
|
||||
// ProvisionerContainerName: "csi-external-provisioner",
|
||||
// }
|
||||
@ -638,7 +637,7 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTes
|
||||
}
|
||||
g.cleanupHandle = framework.AddCleanupAction(cleanupFunc)
|
||||
|
||||
return &testsuites.PerTestConfig{
|
||||
return &storageapi.PerTestConfig{
|
||||
Driver: g,
|
||||
Prefix: "gcepd",
|
||||
Framework: f,
|
||||
|
File diff suppressed because it is too large
Load Diff
4
test/e2e/storage/external/BUILD
vendored
4
test/e2e/storage/external/BUILD
vendored
@ -19,7 +19,7 @@ go_library(
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/skipper:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
"//test/e2e/storage/testpatterns:go_default_library",
|
||||
"//test/e2e/storage/api:go_default_library",
|
||||
"//test/e2e/storage/testsuites:go_default_library",
|
||||
"//test/e2e/storage/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
@ -36,7 +36,7 @@ go_test(
|
||||
deps = [
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
"//test/e2e/storage/testsuites:go_default_library",
|
||||
"//test/e2e/storage/api:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
],
|
||||
)
|
||||
|
46
test/e2e/storage/external/external.go
vendored
46
test/e2e/storage/external/external.go
vendored
@ -39,7 +39,7 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
|
||||
@ -55,7 +55,7 @@ type driverDefinition struct {
|
||||
// for details. The only field with a non-zero default is the list of
|
||||
// supported file systems (SupportedFsType): it is set so that tests using
|
||||
// the default file system are enabled.
|
||||
DriverInfo testsuites.DriverInfo
|
||||
DriverInfo storageapi.DriverInfo
|
||||
|
||||
// StorageClass must be set to enable dynamic provisioning tests.
|
||||
// The default is to not run those tests.
|
||||
@ -171,9 +171,9 @@ func AddDriverDefinition(filename string) error {
|
||||
return errors.Errorf("%q: DriverInfo.Name not set", filename)
|
||||
}
|
||||
|
||||
description := "External Storage " + testsuites.GetDriverNameWithFeatureTags(driver)
|
||||
description := "External Storage " + storageapi.GetDriverNameWithFeatureTags(driver)
|
||||
ginkgo.Describe(description, func() {
|
||||
testsuites.DefineTestSuites(driver, testsuites.CSISuites)
|
||||
storageapi.DefineTestSuites(driver, testsuites.CSISuites)
|
||||
})
|
||||
|
||||
return nil
|
||||
@ -189,7 +189,7 @@ func loadDriverDefinition(filename string) (*driverDefinition, error) {
|
||||
}
|
||||
// Some reasonable defaults follow.
|
||||
driver := &driverDefinition{
|
||||
DriverInfo: testsuites.DriverInfo{
|
||||
DriverInfo: storageapi.DriverInfo{
|
||||
SupportedFsType: sets.NewString(
|
||||
"", // Default fsType
|
||||
),
|
||||
@ -206,20 +206,20 @@ func loadDriverDefinition(filename string) (*driverDefinition, error) {
|
||||
return driver, nil
|
||||
}
|
||||
|
||||
var _ testsuites.TestDriver = &driverDefinition{}
|
||||
var _ storageapi.TestDriver = &driverDefinition{}
|
||||
|
||||
// We have to implement the interface because dynamic PV may or may
|
||||
// not be supported. driverDefinition.SkipUnsupportedTest checks that
|
||||
// based on the actual driver definition.
|
||||
var _ testsuites.DynamicPVTestDriver = &driverDefinition{}
|
||||
var _ storageapi.DynamicPVTestDriver = &driverDefinition{}
|
||||
|
||||
// Same for snapshotting.
|
||||
var _ testsuites.SnapshottableTestDriver = &driverDefinition{}
|
||||
var _ storageapi.SnapshottableTestDriver = &driverDefinition{}
|
||||
|
||||
// And for ephemeral volumes.
|
||||
var _ testsuites.EphemeralTestDriver = &driverDefinition{}
|
||||
var _ storageapi.EphemeralTestDriver = &driverDefinition{}
|
||||
|
||||
var _ testsuites.CustomTimeoutsTestDriver = &driverDefinition{}
|
||||
var _ storageapi.CustomTimeoutsTestDriver = &driverDefinition{}
|
||||
|
||||
// runtime.DecodeInto needs a runtime.Object but doesn't do any
|
||||
// deserialization of it and therefore none of the methods below need
|
||||
@ -234,21 +234,21 @@ func (d *driverDefinition) GetObjectKind() schema.ObjectKind {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *driverDefinition) GetDriverInfo() *testsuites.DriverInfo {
|
||||
func (d *driverDefinition) GetDriverInfo() *storageapi.DriverInfo {
|
||||
return &d.DriverInfo
|
||||
}
|
||||
|
||||
func (d *driverDefinition) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
||||
func (d *driverDefinition) SkipUnsupportedTest(pattern storageapi.TestPattern) {
|
||||
supported := false
|
||||
// TODO (?): add support for more volume types
|
||||
switch pattern.VolType {
|
||||
case "":
|
||||
supported = true
|
||||
case testpatterns.DynamicPV:
|
||||
case storageapi.DynamicPV:
|
||||
if d.StorageClass.FromName || d.StorageClass.FromFile != "" || d.StorageClass.FromExistingClassName != "" {
|
||||
supported = true
|
||||
}
|
||||
case testpatterns.CSIInlineVolume:
|
||||
case storageapi.CSIInlineVolume:
|
||||
supported = len(d.InlineVolumes) != 0
|
||||
}
|
||||
if !supported {
|
||||
@ -257,7 +257,7 @@ func (d *driverDefinition) SkipUnsupportedTest(pattern testpatterns.TestPattern)
|
||||
|
||||
}
|
||||
|
||||
func (d *driverDefinition) GetDynamicProvisionStorageClass(e2econfig *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||
func (d *driverDefinition) GetDynamicProvisionStorageClass(e2econfig *storageapi.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||
var (
|
||||
sc *storagev1.StorageClass
|
||||
err error
|
||||
@ -295,7 +295,7 @@ func (d *driverDefinition) GetDynamicProvisionStorageClass(e2econfig *testsuites
|
||||
// reconsidered if we eventually need to move in-tree storage tests out.
|
||||
sc.Parameters["csi.storage.k8s.io/fstype"] = fsType
|
||||
}
|
||||
return testsuites.CopyStorageClass(sc, f.Namespace.Name, "e2e-sc")
|
||||
return storageapi.CopyStorageClass(sc, f.Namespace.Name, "e2e-sc")
|
||||
}
|
||||
|
||||
func (d *driverDefinition) GetTimeouts() *framework.TimeoutContext {
|
||||
@ -348,7 +348,7 @@ func loadSnapshotClass(filename string) (*unstructured.Unstructured, error) {
|
||||
return snapshotClass, nil
|
||||
}
|
||||
|
||||
func (d *driverDefinition) GetSnapshotClass(e2econfig *testsuites.PerTestConfig) *unstructured.Unstructured {
|
||||
func (d *driverDefinition) GetSnapshotClass(e2econfig *storageapi.PerTestConfig) *unstructured.Unstructured {
|
||||
if !d.SnapshotClass.FromName && d.SnapshotClass.FromFile == "" && d.SnapshotClass.FromExistingClassName == "" {
|
||||
e2eskipper.Skipf("Driver %q does not support snapshotting - skipping", d.DriverInfo.Name)
|
||||
}
|
||||
@ -363,7 +363,7 @@ func (d *driverDefinition) GetSnapshotClass(e2econfig *testsuites.PerTestConfig)
|
||||
case d.SnapshotClass.FromName:
|
||||
// Do nothing (just use empty parameters)
|
||||
case d.SnapshotClass.FromExistingClassName != "":
|
||||
snapshotClass, err := f.DynamicClient.Resource(testsuites.SnapshotClassGVR).Get(context.TODO(), d.SnapshotClass.FromExistingClassName, metav1.GetOptions{})
|
||||
snapshotClass, err := f.DynamicClient.Resource(utils.SnapshotClassGVR).Get(context.TODO(), d.SnapshotClass.FromExistingClassName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "getting snapshot class %s", d.SnapshotClass.FromExistingClassName)
|
||||
|
||||
if params, ok := snapshotClass.Object["parameters"].(map[string]interface{}); ok {
|
||||
@ -390,10 +390,10 @@ func (d *driverDefinition) GetSnapshotClass(e2econfig *testsuites.PerTestConfig)
|
||||
}
|
||||
}
|
||||
|
||||
return testsuites.GetSnapshotClass(snapshotter, parameters, ns, suffix)
|
||||
return storageapi.GetSnapshotClass(snapshotter, parameters, ns, suffix)
|
||||
}
|
||||
|
||||
func (d *driverDefinition) GetVolume(e2econfig *testsuites.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) {
|
||||
func (d *driverDefinition) GetVolume(e2econfig *storageapi.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) {
|
||||
if len(d.InlineVolumes) == 0 {
|
||||
e2eskipper.Skipf("%s does not have any InlineVolumeAttributes defined", d.DriverInfo.Name)
|
||||
}
|
||||
@ -401,12 +401,12 @@ func (d *driverDefinition) GetVolume(e2econfig *testsuites.PerTestConfig, volume
|
||||
return e2evolume.Attributes, e2evolume.Shared, e2evolume.ReadOnly
|
||||
}
|
||||
|
||||
func (d *driverDefinition) GetCSIDriverName(e2econfig *testsuites.PerTestConfig) string {
|
||||
func (d *driverDefinition) GetCSIDriverName(e2econfig *storageapi.PerTestConfig) string {
|
||||
return d.DriverInfo.Name
|
||||
}
|
||||
|
||||
func (d *driverDefinition) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||
e2econfig := &testsuites.PerTestConfig{
|
||||
func (d *driverDefinition) PrepareTest(f *framework.Framework) (*storageapi.PerTestConfig, func()) {
|
||||
e2econfig := &storageapi.PerTestConfig{
|
||||
Driver: d,
|
||||
Prefix: "external",
|
||||
Framework: f,
|
||||
|
4
test/e2e/storage/external/external_test.go
vendored
4
test/e2e/storage/external/external_test.go
vendored
@ -23,12 +23,12 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
)
|
||||
|
||||
func TestDriverParameter(t *testing.T) {
|
||||
expected := &driverDefinition{
|
||||
DriverInfo: testsuites.DriverInfo{
|
||||
DriverInfo: storageapi.DriverInfo{
|
||||
Name: "foo.example.com",
|
||||
SupportedFsType: sets.NewString(
|
||||
"", // Default fsType
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2eevents "k8s.io/kubernetes/test/e2e/framework/events"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -263,7 +264,7 @@ var _ = utils.SIGDescribe("HostPathType Character Device [Slow]", func() {
|
||||
targetCharDev = path.Join(hostBaseDir, "achardev")
|
||||
ginkgo.By("Create a character device for further testing")
|
||||
cmd := fmt.Sprintf("mknod %s c 89 1", path.Join(mountBaseDir, "achardev"))
|
||||
stdout, stderr, err := utils.PodExec(f, basePod, cmd)
|
||||
stdout, stderr, err := e2evolume.PodExec(f, basePod, cmd)
|
||||
framework.ExpectNoError(err, "command: %q, stdout: %s\nstderr: %s", cmd, stdout, stderr)
|
||||
})
|
||||
|
||||
@ -332,7 +333,7 @@ var _ = utils.SIGDescribe("HostPathType Block Device [Slow]", func() {
|
||||
targetBlockDev = path.Join(hostBaseDir, "ablkdev")
|
||||
ginkgo.By("Create a block device for further testing")
|
||||
cmd := fmt.Sprintf("mknod %s b 89 1", path.Join(mountBaseDir, "ablkdev"))
|
||||
stdout, stderr, err := utils.PodExec(f, basePod, cmd)
|
||||
stdout, stderr, err := e2evolume.PodExec(f, basePod, cmd)
|
||||
framework.ExpectNoError(err, "command %q: stdout: %s\nstderr: %s", cmd, stdout, stderr)
|
||||
})
|
||||
|
||||
|
@ -18,13 +18,14 @@ package storage
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo"
|
||||
"k8s.io/kubernetes/test/e2e/storage/api"
|
||||
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
// List of testDrivers to be executed in below loop
|
||||
var testDrivers = []func() testsuites.TestDriver{
|
||||
var testDrivers = []func() api.TestDriver{
|
||||
drivers.InitNFSDriver,
|
||||
drivers.InitGlusterFSDriver,
|
||||
drivers.InitISCSIDriver,
|
||||
@ -54,8 +55,8 @@ var _ = utils.SIGDescribe("In-tree Volumes", func() {
|
||||
for _, initDriver := range testDrivers {
|
||||
curDriver := initDriver()
|
||||
|
||||
ginkgo.Context(testsuites.GetDriverNameWithFeatureTags(curDriver), func() {
|
||||
testsuites.DefineTestSuites(curDriver, testsuites.BaseSuites)
|
||||
ginkgo.Context(api.GetDriverNameWithFeatureTags(curDriver), func() {
|
||||
api.DefineTestSuites(curDriver, testsuites.BaseSuites)
|
||||
})
|
||||
}
|
||||
})
|
||||
|
@ -45,6 +45,7 @@ import (
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2estatefulset "k8s.io/kubernetes/test/e2e/framework/statefulset"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@ -1077,7 +1078,7 @@ func testReadFileContent(f *framework.Framework, testFileDir string, testFile st
|
||||
// Execute a read or write command in a pod.
|
||||
// Fail on error
|
||||
func podRWCmdExec(f *framework.Framework, pod *v1.Pod, cmd string) string {
|
||||
stdout, stderr, err := utils.PodExec(f, pod, cmd)
|
||||
stdout, stderr, err := e2evolume.PodExec(f, pod, cmd)
|
||||
framework.Logf("podRWCmdExec cmd: %q, out: %q, stderr: %q, err: %v", cmd, stdout, stderr, err)
|
||||
framework.ExpectNoError(err)
|
||||
return stdout
|
||||
|
@ -1,27 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["testpattern.go"],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/storage/testpatterns",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
@ -1,11 +1,10 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"base.go",
|
||||
"disruptive.go",
|
||||
"driveroperations.go",
|
||||
"ephemeral.go",
|
||||
"fsgroupchangepolicy.go",
|
||||
"multivolume.go",
|
||||
@ -13,7 +12,6 @@ go_library(
|
||||
"snapshottable.go",
|
||||
"snapshottable_stress.go",
|
||||
"subpath.go",
|
||||
"testdriver.go",
|
||||
"topology.go",
|
||||
"volume_expand.go",
|
||||
"volume_io.go",
|
||||
@ -36,14 +34,10 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/rand:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/metrics/testutil:go_default_library",
|
||||
@ -57,13 +51,11 @@ go_library(
|
||||
"//test/e2e/framework/pv:go_default_library",
|
||||
"//test/e2e/framework/skipper:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
"//test/e2e/storage/podlogs:go_default_library",
|
||||
"//test/e2e/storage/testpatterns:go_default_library",
|
||||
"//test/e2e/storage/api:go_default_library",
|
||||
"//test/e2e/storage/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -81,16 +73,3 @@ filegroup(
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"api_test.go",
|
||||
"base_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
"//test/e2e/storage/testpatterns:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -1,51 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package testsuites_test is used intentionally to ensure that the
|
||||
// code below only has access to exported names. It doesn't have any
|
||||
// actual test. That the custom volume test suite defined below
|
||||
// compile is the test.
|
||||
//
|
||||
// It's needed because we don't have any in-tree volume test
|
||||
// suite implementations that aren't in the "testuites" package itself.
|
||||
// We don't need this for the "TestDriver" interface because there
|
||||
// we have implementations in a separate package.
|
||||
package testsuites_test
|
||||
|
||||
import (
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
)
|
||||
|
||||
type fakeSuite struct{}
|
||||
|
||||
func (f *fakeSuite) GetTestSuiteInfo() testsuites.TestSuiteInfo {
|
||||
return testsuites.TestSuiteInfo{
|
||||
Name: "fake",
|
||||
FeatureTag: "",
|
||||
TestPatterns: []testpatterns.TestPattern{testpatterns.DefaultFsDynamicPV},
|
||||
SupportedSizeRange: e2evolume.SizeRange{Min: "1Mi", Max: "1Gi"},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fakeSuite) DefineTests(testsuites.TestDriver, testpatterns.TestPattern) {
|
||||
}
|
||||
|
||||
func (f *fakeSuite) SkipUnsupportedTests(testsuites.TestDriver, testpatterns.TestPattern) {
|
||||
}
|
||||
|
||||
var _ testsuites.TestSuite = &fakeSuite{}
|
@ -19,41 +19,20 @@ package testsuites
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"math"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/component-base/metrics/testutil"
|
||||
csitrans "k8s.io/csi-translation-lib"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/podlogs"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
)
|
||||
|
||||
var (
|
||||
migratedPlugins *string
|
||||
minValidSize = "1Ki"
|
||||
maxValidSize = "10Ei"
|
||||
)
|
||||
var migratedPlugins *string
|
||||
|
||||
func init() {
|
||||
migratedPlugins = flag.String("storage.migratedPlugins", "", "comma separated list of in-tree plugin names of form 'kubernetes.io/{pluginName}' migrated to CSI")
|
||||
@ -73,7 +52,7 @@ type migrationOpCheck struct {
|
||||
}
|
||||
|
||||
// BaseSuites is a list of storage test suites that work for in-tree and CSI drivers
|
||||
var BaseSuites = []func() TestSuite{
|
||||
var BaseSuites = []func() storageapi.TestSuite{
|
||||
InitVolumesTestSuite,
|
||||
InitVolumeIOTestSuite,
|
||||
InitVolumeModeTestSuite,
|
||||
@ -95,598 +74,6 @@ var CSISuites = append(BaseSuites,
|
||||
InitSnapshottableStressTestSuite,
|
||||
)
|
||||
|
||||
// TestSuite represents an interface for a set of tests which works with TestDriver.
|
||||
// Each testsuite should implement this interface.
|
||||
// All the functions except GetTestSuiteInfo() should not be called directly. Instead,
|
||||
// use RegisterTests() to register the tests in a more standard way.
|
||||
type TestSuite interface {
|
||||
GetTestSuiteInfo() TestSuiteInfo
|
||||
// DefineTests defines tests of the testpattern for the driver.
|
||||
// Called inside a Ginkgo context that reflects the current driver and test pattern,
|
||||
// so the test suite can define tests directly with ginkgo.It.
|
||||
DefineTests(TestDriver, testpatterns.TestPattern)
|
||||
// SkipUnsupportedTests will skip the test suite based on the given TestPattern, TestDriver
|
||||
// Testsuite should check if the given pattern and driver works for the "whole testsuite"
|
||||
// Testcase specific check should happen inside defineTests
|
||||
SkipUnsupportedTests(TestDriver, testpatterns.TestPattern)
|
||||
}
|
||||
|
||||
// RegisterTests register the driver + pattern combination to the inside TestSuite
|
||||
// This function actually register tests inside testsuite
|
||||
func RegisterTests(suite TestSuite, driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
tsInfo := suite.GetTestSuiteInfo()
|
||||
testName := fmt.Sprintf("[Testpattern: %s]%s %s%s", pattern.Name, pattern.FeatureTag, tsInfo.Name, tsInfo.FeatureTag)
|
||||
ginkgo.Context(testName, func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
// skip all the invalid combination of driver and pattern
|
||||
SkipInvalidDriverPatternCombination(driver, pattern)
|
||||
// skip the unsupported test pattern and driver combination specific for this TestSuite
|
||||
suite.SkipUnsupportedTests(driver, pattern)
|
||||
})
|
||||
// actually define the tests
|
||||
// at this step the testsuite should not worry about if the pattern and driver
|
||||
// does not fit for the whole testsuite. But driver&pattern check
|
||||
// might still needed for specific independent test cases.
|
||||
suite.DefineTests(driver, pattern)
|
||||
})
|
||||
}
|
||||
|
||||
// TestSuiteInfo represents a set of parameters for TestSuite
|
||||
type TestSuiteInfo struct {
|
||||
Name string // name of the TestSuite
|
||||
FeatureTag string // featureTag for the TestSuite
|
||||
TestPatterns []testpatterns.TestPattern // Slice of TestPattern for the TestSuite
|
||||
SupportedSizeRange e2evolume.SizeRange // Size range supported by the test suite
|
||||
}
|
||||
|
||||
// DefineTestSuites defines tests for all testpatterns and all testSuites for a driver
|
||||
func DefineTestSuites(driver TestDriver, tsInits []func() TestSuite) {
|
||||
for _, testSuiteInit := range tsInits {
|
||||
suite := testSuiteInit()
|
||||
for _, pattern := range suite.GetTestSuiteInfo().TestPatterns {
|
||||
RegisterTests(suite, driver, pattern)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SkipInvalidDriverPatternCombination will skip tests if the combination of driver, and testpattern
|
||||
// is not compatible to be tested. This function will be called in the RegisterTests() to make
|
||||
// sure all the testsuites we defined are valid.
|
||||
//
|
||||
// Whether it needs to be skipped is checked by following steps:
|
||||
// 0. Check with driver SkipUnsupportedTest
|
||||
// 1. Check if volType is supported by driver from its interface
|
||||
// 2. Check if fsType is supported
|
||||
//
|
||||
// Test suites can also skip tests inside their own skipUnsupportedTests function or in
|
||||
// individual tests.
|
||||
func SkipInvalidDriverPatternCombination(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
dInfo := driver.GetDriverInfo()
|
||||
var isSupported bool
|
||||
|
||||
// 0. Check with driver specific logic
|
||||
driver.SkipUnsupportedTest(pattern)
|
||||
|
||||
// 1. Check if Whether volType is supported by driver from its interface
|
||||
switch pattern.VolType {
|
||||
case testpatterns.InlineVolume:
|
||||
_, isSupported = driver.(InlineVolumeTestDriver)
|
||||
case testpatterns.PreprovisionedPV:
|
||||
_, isSupported = driver.(PreprovisionedPVTestDriver)
|
||||
case testpatterns.DynamicPV, testpatterns.GenericEphemeralVolume:
|
||||
_, isSupported = driver.(DynamicPVTestDriver)
|
||||
case testpatterns.CSIInlineVolume:
|
||||
_, isSupported = driver.(EphemeralTestDriver)
|
||||
default:
|
||||
isSupported = false
|
||||
}
|
||||
|
||||
if !isSupported {
|
||||
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
|
||||
}
|
||||
|
||||
// 2. Check if fsType is supported
|
||||
if !dInfo.SupportedFsType.Has(pattern.FsType) {
|
||||
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.FsType)
|
||||
}
|
||||
if pattern.FsType == "xfs" && framework.NodeOSDistroIs("windows") {
|
||||
e2eskipper.Skipf("Distro doesn't support xfs -- skipping")
|
||||
}
|
||||
if pattern.FsType == "ntfs" && !framework.NodeOSDistroIs("windows") {
|
||||
e2eskipper.Skipf("Distro %s doesn't support ntfs -- skipping", framework.TestContext.NodeOSDistro)
|
||||
}
|
||||
}
|
||||
|
||||
// VolumeResource is a generic implementation of TestResource that wil be able to
|
||||
// be used in most of TestSuites.
|
||||
// See volume_io.go or volumes.go in test/e2e/storage/testsuites/ for how to use this resource.
|
||||
// Also, see subpath.go in the same directory for how to extend and use it.
|
||||
type VolumeResource struct {
|
||||
Config *PerTestConfig
|
||||
Pattern testpatterns.TestPattern
|
||||
VolSource *v1.VolumeSource
|
||||
Pvc *v1.PersistentVolumeClaim
|
||||
Pv *v1.PersistentVolume
|
||||
Sc *storagev1.StorageClass
|
||||
|
||||
Volume TestVolume
|
||||
}
|
||||
|
||||
// CreateVolumeResource constructs a VolumeResource for the current test. It knows how to deal with
|
||||
// different test pattern volume types.
|
||||
func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, testVolumeSizeRange e2evolume.SizeRange) *VolumeResource {
|
||||
r := VolumeResource{
|
||||
Config: config,
|
||||
Pattern: pattern,
|
||||
}
|
||||
dInfo := driver.GetDriverInfo()
|
||||
f := config.Framework
|
||||
cs := f.ClientSet
|
||||
|
||||
// Create volume for pre-provisioned volume tests
|
||||
r.Volume = CreateVolume(driver, config, pattern.VolType)
|
||||
|
||||
switch pattern.VolType {
|
||||
case testpatterns.InlineVolume:
|
||||
framework.Logf("Creating resource for inline volume")
|
||||
if iDriver, ok := driver.(InlineVolumeTestDriver); ok {
|
||||
r.VolSource = iDriver.GetVolumeSource(false, pattern.FsType, r.Volume)
|
||||
}
|
||||
case testpatterns.PreprovisionedPV:
|
||||
framework.Logf("Creating resource for pre-provisioned PV")
|
||||
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
|
||||
pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, pattern.FsType, r.Volume)
|
||||
if pvSource != nil {
|
||||
r.Pv, r.Pvc = createPVCPV(f, dInfo.Name, pvSource, volumeNodeAffinity, pattern.VolMode, dInfo.RequiredAccessModes)
|
||||
r.VolSource = createVolumeSource(r.Pvc.Name, false /* readOnly */)
|
||||
}
|
||||
}
|
||||
case testpatterns.DynamicPV, testpatterns.GenericEphemeralVolume:
|
||||
framework.Logf("Creating resource for dynamic PV")
|
||||
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
|
||||
var err error
|
||||
driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange
|
||||
claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
|
||||
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)
|
||||
framework.Logf("Using claimSize:%s, test suite supported size:%v, driver(%s) supported size:%v ",
|
||||
claimSize, testVolumeSizeRange, dDriver.GetDriverInfo().Name, driverVolumeSizeRange)
|
||||
r.Sc = dDriver.GetDynamicProvisionStorageClass(r.Config, pattern.FsType)
|
||||
|
||||
if pattern.BindingMode != "" {
|
||||
r.Sc.VolumeBindingMode = &pattern.BindingMode
|
||||
}
|
||||
r.Sc.AllowVolumeExpansion = &pattern.AllowExpansion
|
||||
|
||||
ginkgo.By("creating a StorageClass " + r.Sc.Name)
|
||||
|
||||
r.Sc, err = cs.StorageV1().StorageClasses().Create(context.TODO(), r.Sc, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
switch pattern.VolType {
|
||||
case testpatterns.DynamicPV:
|
||||
r.Pv, r.Pvc = createPVCPVFromDynamicProvisionSC(
|
||||
f, dInfo.Name, claimSize, r.Sc, pattern.VolMode, dInfo.RequiredAccessModes)
|
||||
r.VolSource = createVolumeSource(r.Pvc.Name, false /* readOnly */)
|
||||
case testpatterns.GenericEphemeralVolume:
|
||||
driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange
|
||||
claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
|
||||
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)
|
||||
r.VolSource = createEphemeralVolumeSource(r.Sc.Name, dInfo.RequiredAccessModes, claimSize, false /* readOnly */)
|
||||
}
|
||||
}
|
||||
case testpatterns.CSIInlineVolume:
|
||||
framework.Logf("Creating resource for CSI ephemeral inline volume")
|
||||
if eDriver, ok := driver.(EphemeralTestDriver); ok {
|
||||
attributes, _, _ := eDriver.GetVolume(config, 0)
|
||||
r.VolSource = &v1.VolumeSource{
|
||||
CSI: &v1.CSIVolumeSource{
|
||||
Driver: eDriver.GetCSIDriverName(config),
|
||||
VolumeAttributes: attributes,
|
||||
},
|
||||
}
|
||||
}
|
||||
default:
|
||||
framework.Failf("VolumeResource doesn't support: %s", pattern.VolType)
|
||||
}
|
||||
|
||||
if r.VolSource == nil {
|
||||
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
|
||||
}
|
||||
|
||||
return &r
|
||||
}
|
||||
|
||||
func createVolumeSource(pvcName string, readOnly bool) *v1.VolumeSource {
|
||||
return &v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvcName,
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createEphemeralVolumeSource(scName string, accessModes []v1.PersistentVolumeAccessMode, claimSize string, readOnly bool) *v1.VolumeSource {
|
||||
if len(accessModes) == 0 {
|
||||
accessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
||||
}
|
||||
return &v1.VolumeSource{
|
||||
Ephemeral: &v1.EphemeralVolumeSource{
|
||||
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
StorageClassName: &scName,
|
||||
AccessModes: accessModes,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse(claimSize),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// CleanupResource cleans up VolumeResource
|
||||
func (r *VolumeResource) CleanupResource() error {
|
||||
f := r.Config.Framework
|
||||
var cleanUpErrs []error
|
||||
if r.Pvc != nil || r.Pv != nil {
|
||||
switch r.Pattern.VolType {
|
||||
case testpatterns.PreprovisionedPV:
|
||||
ginkgo.By("Deleting pv and pvc")
|
||||
if errs := e2epv.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.Pv, r.Pvc); len(errs) != 0 {
|
||||
framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs))
|
||||
}
|
||||
case testpatterns.DynamicPV:
|
||||
ginkgo.By("Deleting pvc")
|
||||
// We only delete the PVC so that PV (and disk) can be cleaned up by dynamic provisioner
|
||||
if r.Pv != nil && r.Pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete {
|
||||
framework.Failf("Test framework does not currently support Dynamically Provisioned Persistent Volume %v specified with reclaim policy that isnt %v",
|
||||
r.Pv.Name, v1.PersistentVolumeReclaimDelete)
|
||||
}
|
||||
if r.Pvc != nil {
|
||||
cs := f.ClientSet
|
||||
pv := r.Pv
|
||||
if pv == nil && r.Pvc.Name != "" {
|
||||
// This happens for late binding. Check whether we have a volume now that we need to wait for.
|
||||
pvc, err := cs.CoreV1().PersistentVolumeClaims(r.Pvc.Namespace).Get(context.TODO(), r.Pvc.Name, metav1.GetOptions{})
|
||||
switch {
|
||||
case err == nil:
|
||||
if pvc.Spec.VolumeName != "" {
|
||||
pv, err = cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to find PV %v", pvc.Spec.VolumeName))
|
||||
}
|
||||
}
|
||||
case apierrors.IsNotFound(err):
|
||||
// Without the PVC, we cannot locate the corresponding PV. Let's
|
||||
// hope that it is gone.
|
||||
default:
|
||||
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to find PVC %v", r.Pvc.Name))
|
||||
}
|
||||
}
|
||||
|
||||
err := e2epv.DeletePersistentVolumeClaim(f.ClientSet, r.Pvc.Name, f.Namespace.Name)
|
||||
if err != nil {
|
||||
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to delete PVC %v", r.Pvc.Name))
|
||||
}
|
||||
|
||||
if pv != nil {
|
||||
err = e2epv.WaitForPersistentVolumeDeleted(f.ClientSet, pv.Name, 5*time.Second, 5*time.Minute)
|
||||
if err != nil {
|
||||
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err,
|
||||
"Persistent Volume %v not deleted by dynamic provisioner", pv.Name))
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
framework.Failf("Found PVC (%v) or PV (%v) but not running Preprovisioned or Dynamic test pattern", r.Pvc, r.Pv)
|
||||
}
|
||||
}
|
||||
|
||||
if r.Sc != nil {
|
||||
ginkgo.By("Deleting sc")
|
||||
if err := deleteStorageClass(f.ClientSet, r.Sc.Name); err != nil {
|
||||
cleanUpErrs = append(cleanUpErrs, errors.Wrapf(err, "Failed to delete StorageClass %v", r.Sc.Name))
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup volume for pre-provisioned volume tests
|
||||
if r.Volume != nil {
|
||||
if err := tryFunc(r.Volume.DeleteVolume); err != nil {
|
||||
cleanUpErrs = append(cleanUpErrs, errors.Wrap(err, "Failed to delete Volume"))
|
||||
}
|
||||
}
|
||||
return utilerrors.NewAggregate(cleanUpErrs)
|
||||
}
|
||||
|
||||
func createPVCPV(
|
||||
f *framework.Framework,
|
||||
name string,
|
||||
pvSource *v1.PersistentVolumeSource,
|
||||
volumeNodeAffinity *v1.VolumeNodeAffinity,
|
||||
volMode v1.PersistentVolumeMode,
|
||||
accessModes []v1.PersistentVolumeAccessMode,
|
||||
) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
||||
pvConfig := e2epv.PersistentVolumeConfig{
|
||||
NamePrefix: fmt.Sprintf("%s-", name),
|
||||
StorageClassName: f.Namespace.Name,
|
||||
PVSource: *pvSource,
|
||||
NodeAffinity: volumeNodeAffinity,
|
||||
AccessModes: accessModes,
|
||||
}
|
||||
|
||||
pvcConfig := e2epv.PersistentVolumeClaimConfig{
|
||||
StorageClassName: &f.Namespace.Name,
|
||||
AccessModes: accessModes,
|
||||
}
|
||||
|
||||
if volMode != "" {
|
||||
pvConfig.VolumeMode = &volMode
|
||||
pvcConfig.VolumeMode = &volMode
|
||||
}
|
||||
|
||||
framework.Logf("Creating PVC and PV")
|
||||
pv, pvc, err := e2epv.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false)
|
||||
framework.ExpectNoError(err, "PVC, PV creation failed")
|
||||
|
||||
err = e2epv.WaitOnPVandPVC(f.ClientSet, f.Timeouts, f.Namespace.Name, pv, pvc)
|
||||
framework.ExpectNoError(err, "PVC, PV failed to bind")
|
||||
|
||||
return pv, pvc
|
||||
}
|
||||
|
||||
func createPVCPVFromDynamicProvisionSC(
|
||||
f *framework.Framework,
|
||||
name string,
|
||||
claimSize string,
|
||||
sc *storagev1.StorageClass,
|
||||
volMode v1.PersistentVolumeMode,
|
||||
accessModes []v1.PersistentVolumeAccessMode,
|
||||
) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) {
|
||||
cs := f.ClientSet
|
||||
ns := f.Namespace.Name
|
||||
|
||||
ginkgo.By("creating a claim")
|
||||
pvcCfg := e2epv.PersistentVolumeClaimConfig{
|
||||
NamePrefix: name,
|
||||
ClaimSize: claimSize,
|
||||
StorageClassName: &(sc.Name),
|
||||
AccessModes: accessModes,
|
||||
VolumeMode: &volMode,
|
||||
}
|
||||
|
||||
pvc := e2epv.MakePersistentVolumeClaim(pvcCfg, ns)
|
||||
|
||||
var err error
|
||||
pvc, err = e2epv.CreatePVC(cs, ns, pvc)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if !isDelayedBinding(sc) {
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, f.Timeouts.ClaimProvision)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
var pv *v1.PersistentVolume
|
||||
if !isDelayedBinding(sc) {
|
||||
pv, err = cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
return pv, pvc
|
||||
}
|
||||
|
||||
func isDelayedBinding(sc *storagev1.StorageClass) bool {
|
||||
if sc.VolumeBindingMode != nil {
|
||||
return *sc.VolumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found"
|
||||
func deleteStorageClass(cs clientset.Interface, className string) error {
|
||||
err := cs.StorageV1().StorageClasses().Delete(context.TODO(), className, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// convertTestConfig returns a framework test config with the
|
||||
// parameters specified for the testsuite or (if available) the
|
||||
// dynamically created config for the volume server.
|
||||
//
|
||||
// This is done because TestConfig is the public API for
|
||||
// the testsuites package whereas volume.TestConfig is merely
|
||||
// an implementation detail. It contains fields that have no effect,
|
||||
// which makes it unsuitable for use in the testsuits public API.
|
||||
func convertTestConfig(in *PerTestConfig) e2evolume.TestConfig {
|
||||
if in.ServerConfig != nil {
|
||||
return *in.ServerConfig
|
||||
}
|
||||
|
||||
return e2evolume.TestConfig{
|
||||
Namespace: in.Framework.Namespace.Name,
|
||||
Prefix: in.Prefix,
|
||||
ClientNodeSelection: in.ClientNodeSelection,
|
||||
}
|
||||
}
|
||||
|
||||
// getSizeRangesIntersection takes two instances of storage size ranges and determines the
|
||||
// intersection of the intervals (if it exists) and return the minimum of the intersection
|
||||
// to be used as the claim size for the test.
|
||||
// if value not set, that means there's no minimum or maximum size limitation and we set default size for it.
|
||||
func getSizeRangesIntersection(first e2evolume.SizeRange, second e2evolume.SizeRange) (string, error) {
|
||||
var firstMin, firstMax, secondMin, secondMax resource.Quantity
|
||||
var err error
|
||||
|
||||
//if SizeRange is not set, assign a minimum or maximum size
|
||||
if len(first.Min) == 0 {
|
||||
first.Min = minValidSize
|
||||
}
|
||||
if len(first.Max) == 0 {
|
||||
first.Max = maxValidSize
|
||||
}
|
||||
if len(second.Min) == 0 {
|
||||
second.Min = minValidSize
|
||||
}
|
||||
if len(second.Max) == 0 {
|
||||
second.Max = maxValidSize
|
||||
}
|
||||
|
||||
if firstMin, err = resource.ParseQuantity(first.Min); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if firstMax, err = resource.ParseQuantity(first.Max); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if secondMin, err = resource.ParseQuantity(second.Min); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if secondMax, err = resource.ParseQuantity(second.Max); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
interSectionStart := math.Max(float64(firstMin.Value()), float64(secondMin.Value()))
|
||||
intersectionEnd := math.Min(float64(firstMax.Value()), float64(secondMax.Value()))
|
||||
|
||||
// the minimum of the intersection shall be returned as the claim size
|
||||
var intersectionMin resource.Quantity
|
||||
|
||||
if intersectionEnd-interSectionStart >= 0 { //have intersection
|
||||
intersectionMin = *resource.NewQuantity(int64(interSectionStart), "BinarySI") //convert value to BinarySI format. E.g. 5Gi
|
||||
// return the minimum of the intersection as the claim size
|
||||
return intersectionMin.String(), nil
|
||||
}
|
||||
return "", fmt.Errorf("intersection of size ranges %+v, %+v is null", first, second)
|
||||
}
|
||||
|
||||
func getSnapshot(claimName string, ns, snapshotClassName string) *unstructured.Unstructured {
|
||||
snapshot := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": "VolumeSnapshot",
|
||||
"apiVersion": snapshotAPIVersion,
|
||||
"metadata": map[string]interface{}{
|
||||
"generateName": "snapshot-",
|
||||
"namespace": ns,
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"volumeSnapshotClassName": snapshotClassName,
|
||||
"source": map[string]interface{}{
|
||||
"persistentVolumeClaimName": claimName,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return snapshot
|
||||
}
|
||||
func getPreProvisionedSnapshot(snapName, ns, snapshotContentName string) *unstructured.Unstructured {
|
||||
snapshot := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": "VolumeSnapshot",
|
||||
"apiVersion": snapshotAPIVersion,
|
||||
"metadata": map[string]interface{}{
|
||||
"name": snapName,
|
||||
"namespace": ns,
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"source": map[string]interface{}{
|
||||
"volumeSnapshotContentName": snapshotContentName,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return snapshot
|
||||
}
|
||||
func getPreProvisionedSnapshotContent(snapcontentName, snapshotName, snapshotNamespace, snapshotHandle, deletionPolicy, csiDriverName string) *unstructured.Unstructured {
|
||||
snapshotContent := &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": "VolumeSnapshotContent",
|
||||
"apiVersion": snapshotAPIVersion,
|
||||
"metadata": map[string]interface{}{
|
||||
"name": snapcontentName,
|
||||
},
|
||||
"spec": map[string]interface{}{
|
||||
"source": map[string]interface{}{
|
||||
"snapshotHandle": snapshotHandle,
|
||||
},
|
||||
"volumeSnapshotRef": map[string]interface{}{
|
||||
"name": snapshotName,
|
||||
"namespace": snapshotNamespace,
|
||||
},
|
||||
"driver": csiDriverName,
|
||||
"deletionPolicy": deletionPolicy,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return snapshotContent
|
||||
}
|
||||
|
||||
func getPreProvisionedSnapshotContentName(uuid types.UID) string {
|
||||
return fmt.Sprintf("pre-provisioned-snapcontent-%s", string(uuid))
|
||||
}
|
||||
|
||||
func getPreProvisionedSnapshotName(uuid types.UID) string {
|
||||
return fmt.Sprintf("pre-provisioned-snapshot-%s", string(uuid))
|
||||
}
|
||||
|
||||
// StartPodLogs begins capturing log output and events from current
|
||||
// and future pods running in the namespace of the framework. That
|
||||
// ends when the returned cleanup function is called.
|
||||
//
|
||||
// The output goes to log files (when using --report-dir, as in the
|
||||
// CI) or the output stream (otherwise).
|
||||
func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cs := f.ClientSet
|
||||
|
||||
ns := driverNamespace.Name
|
||||
|
||||
to := podlogs.LogOutput{
|
||||
StatusWriter: ginkgo.GinkgoWriter,
|
||||
}
|
||||
if framework.TestContext.ReportDir == "" {
|
||||
to.LogWriter = ginkgo.GinkgoWriter
|
||||
} else {
|
||||
test := ginkgo.CurrentGinkgoTestDescription()
|
||||
// Clean up each individual component text such that
|
||||
// it contains only characters that are valid as file
|
||||
// name.
|
||||
reg := regexp.MustCompile("[^a-zA-Z0-9_-]+")
|
||||
var components []string
|
||||
for _, component := range test.ComponentTexts {
|
||||
components = append(components, reg.ReplaceAllString(component, "_"))
|
||||
}
|
||||
// We end the prefix with a slash to ensure that all logs
|
||||
// end up in a directory named after the current test.
|
||||
//
|
||||
// Each component name maps to a directory. This
|
||||
// avoids cluttering the root artifact directory and
|
||||
// keeps each directory name smaller (the full test
|
||||
// name at one point exceeded 256 characters, which was
|
||||
// too much for some filesystems).
|
||||
to.LogPathPrefix = framework.TestContext.ReportDir + "/" +
|
||||
strings.Join(components, "/") + "/"
|
||||
}
|
||||
podlogs.CopyAllLogs(ctx, cs, ns, to)
|
||||
|
||||
// pod events are something that the framework already collects itself
|
||||
// after a failed test. Logging them live is only useful for interactive
|
||||
// debugging, not when we collect reports.
|
||||
if framework.TestContext.ReportDir == "" {
|
||||
podlogs.WatchPods(ctx, cs, ns, ginkgo.GinkgoWriter)
|
||||
}
|
||||
|
||||
return cancel
|
||||
}
|
||||
|
||||
func getVolumeOpsFromMetricsForPlugin(ms testutil.Metrics, pluginName string) opCounts {
|
||||
totOps := opCounts{}
|
||||
|
||||
@ -838,23 +225,9 @@ func (moc *migrationOpCheck) validateMigrationVolumeOpCounts() {
|
||||
}
|
||||
|
||||
// Skip skipVolTypes patterns if the driver supports dynamic provisioning
|
||||
func skipVolTypePatterns(pattern testpatterns.TestPattern, driver TestDriver, skipVolTypes map[testpatterns.TestVolType]bool) {
|
||||
_, supportsProvisioning := driver.(DynamicPVTestDriver)
|
||||
func skipVolTypePatterns(pattern storageapi.TestPattern, driver storageapi.TestDriver, skipVolTypes map[storageapi.TestVolType]bool) {
|
||||
_, supportsProvisioning := driver.(storageapi.DynamicPVTestDriver)
|
||||
if supportsProvisioning && skipVolTypes[pattern.VolType] {
|
||||
e2eskipper.Skipf("Driver supports dynamic provisioning, skipping %s pattern", pattern.VolType)
|
||||
}
|
||||
}
|
||||
|
||||
func tryFunc(f func()) error {
|
||||
var err error
|
||||
if f == nil {
|
||||
return nil
|
||||
}
|
||||
defer func() {
|
||||
if recoverError := recover(); recoverError != nil {
|
||||
err = fmt.Errorf("%v", recoverError)
|
||||
}
|
||||
}()
|
||||
f()
|
||||
return err
|
||||
}
|
||||
|
@ -26,19 +26,20 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
type disruptiveTestSuite struct {
|
||||
tsInfo TestSuiteInfo
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
}
|
||||
|
||||
// InitCustomDisruptiveTestSuite returns subPathTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomDisruptiveTestSuite(patterns []testpatterns.TestPattern) TestSuite {
|
||||
func InitCustomDisruptiveTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
return &disruptiveTestSuite{
|
||||
tsInfo: TestSuiteInfo{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
Name: "disruptive",
|
||||
FeatureTag: "[Disruptive][LinuxOnly]",
|
||||
TestPatterns: patterns,
|
||||
@ -48,43 +49,43 @@ func InitCustomDisruptiveTestSuite(patterns []testpatterns.TestPattern) TestSuit
|
||||
|
||||
// InitDisruptiveTestSuite returns subPathTestSuite that implements TestSuite interface
|
||||
// using test suite default patterns
|
||||
func InitDisruptiveTestSuite() TestSuite {
|
||||
testPatterns := []testpatterns.TestPattern{
|
||||
func InitDisruptiveTestSuite() storageapi.TestSuite {
|
||||
testPatterns := []storageapi.TestPattern{
|
||||
// FSVolMode is already covered in subpath testsuite
|
||||
testpatterns.DefaultFsInlineVolume,
|
||||
testpatterns.FsVolModePreprovisionedPV,
|
||||
testpatterns.FsVolModeDynamicPV,
|
||||
testpatterns.BlockVolModePreprovisionedPV,
|
||||
testpatterns.BlockVolModeDynamicPV,
|
||||
storageapi.DefaultFsInlineVolume,
|
||||
storageapi.FsVolModePreprovisionedPV,
|
||||
storageapi.FsVolModeDynamicPV,
|
||||
storageapi.BlockVolModePreprovisionedPV,
|
||||
storageapi.BlockVolModeDynamicPV,
|
||||
}
|
||||
return InitCustomDisruptiveTestSuite(testPatterns)
|
||||
}
|
||||
|
||||
func (s *disruptiveTestSuite) GetTestSuiteInfo() TestSuiteInfo {
|
||||
func (s *disruptiveTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
return s.tsInfo
|
||||
}
|
||||
|
||||
func (s *disruptiveTestSuite) SkipUnsupportedTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
skipVolTypePatterns(pattern, driver, testpatterns.NewVolTypeMap(testpatterns.PreprovisionedPV))
|
||||
func (s *disruptiveTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
skipVolTypePatterns(pattern, driver, storageapi.NewVolTypeMap(storageapi.PreprovisionedPV))
|
||||
}
|
||||
|
||||
func (s *disruptiveTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (s *disruptiveTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
type local struct {
|
||||
config *PerTestConfig
|
||||
config *storageapi.PerTestConfig
|
||||
driverCleanup func()
|
||||
|
||||
cs clientset.Interface
|
||||
ns *v1.Namespace
|
||||
|
||||
// VolumeResource contains pv, pvc, sc, etc., owns cleaning that up
|
||||
resource *VolumeResource
|
||||
resource *storageapi.VolumeResource
|
||||
pod *v1.Pod
|
||||
}
|
||||
var l local
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("disruptive", getDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("disruptive", storageapi.GetDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
@ -95,7 +96,7 @@ func (s *disruptiveTestSuite) DefineTests(driver TestDriver, pattern testpattern
|
||||
l.config, l.driverCleanup = driver.PrepareTest(f)
|
||||
|
||||
testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange
|
||||
l.resource = CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.resource = storageapi.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
@ -113,7 +114,7 @@ func (s *disruptiveTestSuite) DefineTests(driver TestDriver, pattern testpattern
|
||||
l.resource = nil
|
||||
}
|
||||
|
||||
errs = append(errs, tryFunc(l.driverCleanup))
|
||||
errs = append(errs, storageutils.TryFunc(l.driverCleanup))
|
||||
l.driverCleanup = nil
|
||||
framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource")
|
||||
}
|
||||
@ -153,7 +154,7 @@ func (s *disruptiveTestSuite) DefineTests(driver TestDriver, pattern testpattern
|
||||
var err error
|
||||
var pvcs []*v1.PersistentVolumeClaim
|
||||
var inlineSources []*v1.VolumeSource
|
||||
if pattern.VolType == testpatterns.InlineVolume {
|
||||
if pattern.VolType == storageapi.InlineVolume {
|
||||
inlineSources = append(inlineSources, l.resource.VolSource)
|
||||
} else {
|
||||
pvcs = append(pvcs, l.resource.Pvc)
|
||||
|
@ -34,19 +34,19 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
type ephemeralTestSuite struct {
|
||||
tsInfo TestSuiteInfo
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
}
|
||||
|
||||
// InitCustomEphemeralTestSuite returns ephemeralTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomEphemeralTestSuite(patterns []testpatterns.TestPattern) TestSuite {
|
||||
func InitCustomEphemeralTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
return &ephemeralTestSuite{
|
||||
tsInfo: TestSuiteInfo{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
Name: "ephemeral",
|
||||
TestPatterns: patterns,
|
||||
},
|
||||
@ -55,17 +55,17 @@ func InitCustomEphemeralTestSuite(patterns []testpatterns.TestPattern) TestSuite
|
||||
|
||||
// InitEphemeralTestSuite returns ephemeralTestSuite that implements TestSuite interface
|
||||
// using test suite default patterns
|
||||
func InitEphemeralTestSuite() TestSuite {
|
||||
genericLateBinding := testpatterns.DefaultFsGenericEphemeralVolume
|
||||
func InitEphemeralTestSuite() storageapi.TestSuite {
|
||||
genericLateBinding := storageapi.DefaultFsGenericEphemeralVolume
|
||||
genericLateBinding.Name += " (late-binding)"
|
||||
genericLateBinding.BindingMode = storagev1.VolumeBindingWaitForFirstConsumer
|
||||
|
||||
genericImmediateBinding := testpatterns.DefaultFsGenericEphemeralVolume
|
||||
genericImmediateBinding := storageapi.DefaultFsGenericEphemeralVolume
|
||||
genericImmediateBinding.Name += " (immediate-binding)"
|
||||
genericImmediateBinding.BindingMode = storagev1.VolumeBindingImmediate
|
||||
|
||||
patterns := []testpatterns.TestPattern{
|
||||
testpatterns.DefaultFsCSIEphemeralVolume,
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.DefaultFsCSIEphemeralVolume,
|
||||
genericLateBinding,
|
||||
genericImmediateBinding,
|
||||
}
|
||||
@ -73,35 +73,35 @@ func InitEphemeralTestSuite() TestSuite {
|
||||
return InitCustomEphemeralTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (p *ephemeralTestSuite) GetTestSuiteInfo() TestSuiteInfo {
|
||||
func (p *ephemeralTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
return p.tsInfo
|
||||
}
|
||||
|
||||
func (p *ephemeralTestSuite) SkipUnsupportedTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (p *ephemeralTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
}
|
||||
|
||||
func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (p *ephemeralTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
type local struct {
|
||||
config *PerTestConfig
|
||||
config *storageapi.PerTestConfig
|
||||
driverCleanup func()
|
||||
|
||||
testCase *EphemeralTest
|
||||
resource *VolumeResource
|
||||
resource *storageapi.VolumeResource
|
||||
}
|
||||
var (
|
||||
eDriver EphemeralTestDriver
|
||||
eDriver storageapi.EphemeralTestDriver
|
||||
l local
|
||||
)
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("ephemeral", getDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("ephemeral", storageapi.GetDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
if pattern.VolType == testpatterns.CSIInlineVolume {
|
||||
eDriver, _ = driver.(EphemeralTestDriver)
|
||||
if pattern.VolType == storageapi.CSIInlineVolume {
|
||||
eDriver, _ = driver.(storageapi.EphemeralTestDriver)
|
||||
}
|
||||
if pattern.VolType == testpatterns.GenericEphemeralVolume {
|
||||
if pattern.VolType == storageapi.GenericEphemeralVolume {
|
||||
enabled, err := GenericEphemeralVolumesEnabled(f.ClientSet, f.Timeouts, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "check GenericEphemeralVolume feature")
|
||||
if !enabled {
|
||||
@ -113,10 +113,10 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
|
||||
|
||||
// Now do the more expensive test initialization.
|
||||
l.config, l.driverCleanup = driver.PrepareTest(f)
|
||||
l.resource = CreateVolumeResource(driver, l.config, pattern, e2evolume.SizeRange{})
|
||||
l.resource = storageapi.CreateVolumeResource(driver, l.config, pattern, e2evolume.SizeRange{})
|
||||
|
||||
switch pattern.VolType {
|
||||
case testpatterns.CSIInlineVolume:
|
||||
case storageapi.CSIInlineVolume:
|
||||
l.testCase = &EphemeralTest{
|
||||
Client: l.config.Framework.ClientSet,
|
||||
Timeouts: f.Timeouts,
|
||||
@ -127,7 +127,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
|
||||
return eDriver.GetVolume(l.config, volumeNumber)
|
||||
},
|
||||
}
|
||||
case testpatterns.GenericEphemeralVolume:
|
||||
case storageapi.GenericEphemeralVolume:
|
||||
l.testCase = &EphemeralTest{
|
||||
Client: l.config.Framework.ClientSet,
|
||||
Timeouts: f.Timeouts,
|
||||
@ -141,7 +141,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
|
||||
cleanup := func() {
|
||||
var cleanUpErrs []error
|
||||
cleanUpErrs = append(cleanUpErrs, l.resource.CleanupResource())
|
||||
cleanUpErrs = append(cleanUpErrs, tryFunc(l.driverCleanup))
|
||||
cleanUpErrs = append(cleanUpErrs, storageutils.TryFunc(l.driverCleanup))
|
||||
err := utilerrors.NewAggregate(cleanUpErrs)
|
||||
framework.ExpectNoError(err, "while cleaning up")
|
||||
}
|
||||
@ -152,7 +152,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
|
||||
|
||||
l.testCase.ReadOnly = true
|
||||
l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} {
|
||||
storageutils.VerifyExecInPodSucceed(f, pod, "mount | grep /mnt/test | grep ro,")
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, "mount | grep /mnt/test | grep ro,")
|
||||
return nil
|
||||
}
|
||||
l.testCase.TestEphemeral()
|
||||
@ -164,7 +164,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
|
||||
|
||||
l.testCase.ReadOnly = false
|
||||
l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} {
|
||||
storageutils.VerifyExecInPodSucceed(f, pod, "mount | grep /mnt/test | grep rw,")
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, "mount | grep /mnt/test | grep rw,")
|
||||
return nil
|
||||
}
|
||||
l.testCase.TestEphemeral()
|
||||
@ -197,8 +197,8 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
|
||||
// visible in the other.
|
||||
if !readOnly && !shared {
|
||||
ginkgo.By("writing data in one pod and checking for it in the second")
|
||||
storageutils.VerifyExecInPodSucceed(f, pod, "touch /mnt/test-0/hello-world")
|
||||
storageutils.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]")
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, "touch /mnt/test-0/hello-world")
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]")
|
||||
}
|
||||
|
||||
defer StopPodAndDependents(f.ClientSet, f.Timeouts, pod2)
|
||||
@ -210,7 +210,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
|
||||
|
||||
ginkgo.It("should support multiple inline ephemeral volumes", func() {
|
||||
if pattern.BindingMode == storagev1.VolumeBindingImmediate &&
|
||||
pattern.VolType == testpatterns.GenericEphemeralVolume {
|
||||
pattern.VolType == storageapi.GenericEphemeralVolume {
|
||||
e2eskipper.Skipf("Multiple generic ephemeral volumes with immediate binding may cause pod startup failures when the volumes get created in separate topology segments.")
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,7 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
@ -42,15 +42,15 @@ const (
|
||||
)
|
||||
|
||||
type fsGroupChangePolicyTestSuite struct {
|
||||
tsInfo TestSuiteInfo
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
}
|
||||
|
||||
var _ TestSuite = &fsGroupChangePolicyTestSuite{}
|
||||
var _ storageapi.TestSuite = &fsGroupChangePolicyTestSuite{}
|
||||
|
||||
// InitCustomFsGroupChangePolicyTestSuite returns fsGroupChangePolicyTestSuite that implements TestSuite interface
|
||||
func InitCustomFsGroupChangePolicyTestSuite(patterns []testpatterns.TestPattern) TestSuite {
|
||||
func InitCustomFsGroupChangePolicyTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
return &fsGroupChangePolicyTestSuite{
|
||||
tsInfo: TestSuiteInfo{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
Name: "fsgroupchangepolicy",
|
||||
TestPatterns: patterns,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
@ -61,21 +61,21 @@ func InitCustomFsGroupChangePolicyTestSuite(patterns []testpatterns.TestPattern)
|
||||
}
|
||||
|
||||
// InitFsGroupChangePolicyTestSuite returns fsGroupChangePolicyTestSuite that implements TestSuite interface
|
||||
func InitFsGroupChangePolicyTestSuite() TestSuite {
|
||||
patterns := []testpatterns.TestPattern{
|
||||
testpatterns.DefaultFsDynamicPV,
|
||||
func InitFsGroupChangePolicyTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.DefaultFsDynamicPV,
|
||||
}
|
||||
return InitCustomFsGroupChangePolicyTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (s *fsGroupChangePolicyTestSuite) GetTestSuiteInfo() TestSuiteInfo {
|
||||
func (s *fsGroupChangePolicyTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
return s.tsInfo
|
||||
}
|
||||
|
||||
func (s *fsGroupChangePolicyTestSuite) SkipUnsupportedTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
skipVolTypePatterns(pattern, driver, testpatterns.NewVolTypeMap(testpatterns.CSIInlineVolume, testpatterns.GenericEphemeralVolume))
|
||||
func (s *fsGroupChangePolicyTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
skipVolTypePatterns(pattern, driver, storageapi.NewVolTypeMap(storageapi.CSIInlineVolume, storageapi.GenericEphemeralVolume))
|
||||
dInfo := driver.GetDriverInfo()
|
||||
if !dInfo.Capabilities[CapFsGroup] {
|
||||
if !dInfo.Capabilities[storageapi.CapFsGroup] {
|
||||
e2eskipper.Skipf("Driver %q does not support FsGroup - skipping", dInfo.Name)
|
||||
}
|
||||
|
||||
@ -83,28 +83,28 @@ func (s *fsGroupChangePolicyTestSuite) SkipUnsupportedTests(driver TestDriver, p
|
||||
e2eskipper.Skipf("Test does not support non-filesystem volume mode - skipping")
|
||||
}
|
||||
|
||||
if pattern.VolType != testpatterns.DynamicPV {
|
||||
if pattern.VolType != storageapi.DynamicPV {
|
||||
e2eskipper.Skipf("Suite %q does not support %v", s.tsInfo.Name, pattern.VolType)
|
||||
}
|
||||
|
||||
_, ok := driver.(DynamicPVTestDriver)
|
||||
_, ok := driver.(storageapi.DynamicPVTestDriver)
|
||||
if !ok {
|
||||
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *fsGroupChangePolicyTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (s *fsGroupChangePolicyTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
type local struct {
|
||||
config *PerTestConfig
|
||||
config *storageapi.PerTestConfig
|
||||
driverCleanup func()
|
||||
driver TestDriver
|
||||
resource *VolumeResource
|
||||
driver storageapi.TestDriver
|
||||
resource *storageapi.VolumeResource
|
||||
}
|
||||
var l local
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("fsgroupchangepolicy", getDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("fsgroupchangepolicy", storageapi.GetDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
e2eskipper.SkipIfNodeOSDistroIs("windows")
|
||||
@ -112,7 +112,7 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver TestDriver, pattern te
|
||||
l.driver = driver
|
||||
l.config, l.driverCleanup = driver.PrepareTest(f)
|
||||
testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange
|
||||
l.resource = CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.resource = storageapi.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
@ -125,7 +125,7 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver TestDriver, pattern te
|
||||
}
|
||||
|
||||
if l.driverCleanup != nil {
|
||||
errs = append(errs, tryFunc(l.driverCleanup))
|
||||
errs = append(errs, storageutils.TryFunc(l.driverCleanup))
|
||||
l.driverCleanup = nil
|
||||
}
|
||||
|
||||
@ -254,15 +254,15 @@ func createPodAndVerifyContentGid(f *framework.Framework, podConfig *e2epod.Conf
|
||||
ginkgo.By(fmt.Sprintf("Creating a sub-directory and file, and verifying their ownership is %s", podFsGroup))
|
||||
cmd := fmt.Sprintf("touch %s", rootDirFilePath)
|
||||
var err error
|
||||
_, _, err = storageutils.PodExec(f, pod, cmd)
|
||||
_, _, err = e2evolume.PodExec(f, pod, cmd)
|
||||
framework.ExpectNoError(err)
|
||||
storageutils.VerifyFilePathGidInPod(f, rootDirFilePath, podFsGroup, pod)
|
||||
|
||||
cmd = fmt.Sprintf("mkdir %s", subdir)
|
||||
_, _, err = storageutils.PodExec(f, pod, cmd)
|
||||
_, _, err = e2evolume.PodExec(f, pod, cmd)
|
||||
framework.ExpectNoError(err)
|
||||
cmd = fmt.Sprintf("touch %s", subDirFilePath)
|
||||
_, _, err = storageutils.PodExec(f, pod, cmd)
|
||||
_, _, err = e2evolume.PodExec(f, pod, cmd)
|
||||
framework.ExpectNoError(err)
|
||||
storageutils.VerifyFilePathGidInPod(f, subDirFilePath, podFsGroup, pod)
|
||||
return pod
|
||||
|
@ -32,22 +32,23 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
type multiVolumeTestSuite struct {
|
||||
tsInfo TestSuiteInfo
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
}
|
||||
|
||||
var _ TestSuite = &multiVolumeTestSuite{}
|
||||
var _ storageapi.TestSuite = &multiVolumeTestSuite{}
|
||||
|
||||
// InitCustomMultiVolumeTestSuite returns multiVolumeTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomMultiVolumeTestSuite(patterns []testpatterns.TestPattern) TestSuite {
|
||||
func InitCustomMultiVolumeTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
return &multiVolumeTestSuite{
|
||||
tsInfo: TestSuiteInfo{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
Name: "multiVolume [Slow]",
|
||||
TestPatterns: patterns,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
@ -59,37 +60,37 @@ func InitCustomMultiVolumeTestSuite(patterns []testpatterns.TestPattern) TestSui
|
||||
|
||||
// InitMultiVolumeTestSuite returns multiVolumeTestSuite that implements TestSuite interface
|
||||
// using test suite default patterns
|
||||
func InitMultiVolumeTestSuite() TestSuite {
|
||||
patterns := []testpatterns.TestPattern{
|
||||
testpatterns.FsVolModePreprovisionedPV,
|
||||
testpatterns.FsVolModeDynamicPV,
|
||||
testpatterns.BlockVolModePreprovisionedPV,
|
||||
testpatterns.BlockVolModeDynamicPV,
|
||||
func InitMultiVolumeTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.FsVolModePreprovisionedPV,
|
||||
storageapi.FsVolModeDynamicPV,
|
||||
storageapi.BlockVolModePreprovisionedPV,
|
||||
storageapi.BlockVolModeDynamicPV,
|
||||
}
|
||||
return InitCustomMultiVolumeTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (t *multiVolumeTestSuite) GetTestSuiteInfo() TestSuiteInfo {
|
||||
func (t *multiVolumeTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
return t.tsInfo
|
||||
}
|
||||
|
||||
func (t *multiVolumeTestSuite) SkipUnsupportedTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (t *multiVolumeTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
dInfo := driver.GetDriverInfo()
|
||||
skipVolTypePatterns(pattern, driver, testpatterns.NewVolTypeMap(testpatterns.PreprovisionedPV))
|
||||
if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[CapBlock] {
|
||||
skipVolTypePatterns(pattern, driver, storageapi.NewVolTypeMap(storageapi.PreprovisionedPV))
|
||||
if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[storageapi.CapBlock] {
|
||||
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolMode)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (t *multiVolumeTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
type local struct {
|
||||
config *PerTestConfig
|
||||
config *storageapi.PerTestConfig
|
||||
driverCleanup func()
|
||||
|
||||
cs clientset.Interface
|
||||
ns *v1.Namespace
|
||||
driver TestDriver
|
||||
resources []*VolumeResource
|
||||
driver storageapi.TestDriver
|
||||
resources []*storageapi.VolumeResource
|
||||
|
||||
migrationCheck *migrationOpCheck
|
||||
}
|
||||
@ -100,7 +101,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("multivolume", getDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("multivolume", storageapi.GetDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
@ -119,7 +120,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
||||
errs = append(errs, resource.CleanupResource())
|
||||
}
|
||||
|
||||
errs = append(errs, tryFunc(l.driverCleanup))
|
||||
errs = append(errs, storageutils.TryFunc(l.driverCleanup))
|
||||
l.driverCleanup = nil
|
||||
framework.ExpectNoError(errors.NewAggregate(errs), "while cleanup resource")
|
||||
l.migrationCheck.validateMigrationVolumeOpCounts()
|
||||
@ -134,7 +135,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
||||
// Currently, multiple volumes are not generally available for pre-provisoined volume,
|
||||
// because containerized storage servers, such as iSCSI and rbd, are just returning
|
||||
// a static volume inside container, not actually creating a new volume per request.
|
||||
if pattern.VolType == testpatterns.PreprovisionedPV {
|
||||
if pattern.VolType == storageapi.PreprovisionedPV {
|
||||
e2eskipper.Skipf("This test doesn't work with pre-provisioned volume -- skipping")
|
||||
}
|
||||
|
||||
@ -146,7 +147,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
||||
|
||||
for i := 0; i < numVols; i++ {
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
resource := CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
resource := storageapi.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.resources = append(l.resources, resource)
|
||||
pvcs = append(pvcs, resource.Pvc)
|
||||
}
|
||||
@ -164,7 +165,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
||||
// Currently, multiple volumes are not generally available for pre-provisoined volume,
|
||||
// because containerized storage servers, such as iSCSI and rbd, are just returning
|
||||
// a static volume inside container, not actually creating a new volume per request.
|
||||
if pattern.VolType == testpatterns.PreprovisionedPV {
|
||||
if pattern.VolType == storageapi.PreprovisionedPV {
|
||||
e2eskipper.Skipf("This test doesn't work with pre-provisioned volume -- skipping")
|
||||
}
|
||||
|
||||
@ -172,8 +173,8 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
||||
defer cleanup()
|
||||
|
||||
// Check different-node test requirement
|
||||
if l.driver.GetDriverInfo().Capabilities[CapSingleNodeVolume] {
|
||||
e2eskipper.Skipf("Driver %s only supports %v -- skipping", l.driver.GetDriverInfo().Name, CapSingleNodeVolume)
|
||||
if l.driver.GetDriverInfo().Capabilities[storageapi.CapSingleNodeVolume] {
|
||||
e2eskipper.Skipf("Driver %s only supports %v -- skipping", l.driver.GetDriverInfo().Name, storageapi.CapSingleNodeVolume)
|
||||
}
|
||||
nodes, err := e2enode.GetReadySchedulableNodes(l.cs)
|
||||
framework.ExpectNoError(err)
|
||||
@ -196,7 +197,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
||||
|
||||
for i := 0; i < numVols; i++ {
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
resource := CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
resource := storageapi.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.resources = append(l.resources, resource)
|
||||
pvcs = append(pvcs, resource.Pvc)
|
||||
}
|
||||
@ -218,7 +219,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
||||
// Currently, multiple volumes are not generally available for pre-provisoined volume,
|
||||
// because containerized storage servers, such as iSCSI and rbd, are just returning
|
||||
// a static volume inside container, not actually creating a new volume per request.
|
||||
if pattern.VolType == testpatterns.PreprovisionedPV {
|
||||
if pattern.VolType == storageapi.PreprovisionedPV {
|
||||
e2eskipper.Skipf("This test doesn't work with pre-provisioned volume -- skipping")
|
||||
}
|
||||
|
||||
@ -235,7 +236,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
||||
curPattern.VolMode = v1.PersistentVolumeFilesystem
|
||||
}
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
resource := CreateVolumeResource(driver, l.config, curPattern, testVolumeSizeRange)
|
||||
resource := storageapi.CreateVolumeResource(driver, l.config, curPattern, testVolumeSizeRange)
|
||||
l.resources = append(l.resources, resource)
|
||||
pvcs = append(pvcs, resource.Pvc)
|
||||
}
|
||||
@ -257,7 +258,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
||||
// Currently, multiple volumes are not generally available for pre-provisoined volume,
|
||||
// because containerized storage servers, such as iSCSI and rbd, are just returning
|
||||
// a static volume inside container, not actually creating a new volume per request.
|
||||
if pattern.VolType == testpatterns.PreprovisionedPV {
|
||||
if pattern.VolType == storageapi.PreprovisionedPV {
|
||||
e2eskipper.Skipf("This test doesn't work with pre-provisioned volume -- skipping")
|
||||
}
|
||||
|
||||
@ -265,8 +266,8 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
||||
defer cleanup()
|
||||
|
||||
// Check different-node test requirement
|
||||
if l.driver.GetDriverInfo().Capabilities[CapSingleNodeVolume] {
|
||||
e2eskipper.Skipf("Driver %s only supports %v -- skipping", l.driver.GetDriverInfo().Name, CapSingleNodeVolume)
|
||||
if l.driver.GetDriverInfo().Capabilities[storageapi.CapSingleNodeVolume] {
|
||||
e2eskipper.Skipf("Driver %s only supports %v -- skipping", l.driver.GetDriverInfo().Name, storageapi.CapSingleNodeVolume)
|
||||
}
|
||||
nodes, err := e2enode.GetReadySchedulableNodes(l.cs)
|
||||
framework.ExpectNoError(err)
|
||||
@ -294,7 +295,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
||||
curPattern.VolMode = v1.PersistentVolumeFilesystem
|
||||
}
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
resource := CreateVolumeResource(driver, l.config, curPattern, testVolumeSizeRange)
|
||||
resource := storageapi.CreateVolumeResource(driver, l.config, curPattern, testVolumeSizeRange)
|
||||
l.resources = append(l.resources, resource)
|
||||
pvcs = append(pvcs, resource.Pvc)
|
||||
}
|
||||
@ -314,13 +315,13 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
||||
|
||||
numPods := 2
|
||||
|
||||
if !l.driver.GetDriverInfo().Capabilities[CapMultiPODs] {
|
||||
if !l.driver.GetDriverInfo().Capabilities[storageapi.CapMultiPODs] {
|
||||
e2eskipper.Skipf("Driver %q does not support multiple concurrent pods - skipping", dInfo.Name)
|
||||
}
|
||||
|
||||
// Create volume
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
resource := CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
|
||||
resource := storageapi.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.resources = append(l.resources, resource)
|
||||
|
||||
// Test access to the volume from pods on different node
|
||||
@ -339,13 +340,13 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
||||
|
||||
numPods := 2
|
||||
|
||||
if !l.driver.GetDriverInfo().Capabilities[CapMultiPODs] {
|
||||
if !l.driver.GetDriverInfo().Capabilities[storageapi.CapMultiPODs] {
|
||||
e2eskipper.Skipf("Driver %q does not support multiple concurrent pods - skipping", dInfo.Name)
|
||||
}
|
||||
|
||||
// Create volume
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
resource := CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
|
||||
resource := storageapi.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.resources = append(l.resources, resource)
|
||||
|
||||
// Initialize the volume with a filesystem - it's going to be mounted as read-only below.
|
||||
@ -367,8 +368,8 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
||||
|
||||
numPods := 2
|
||||
|
||||
if !l.driver.GetDriverInfo().Capabilities[CapRWX] {
|
||||
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", l.driver.GetDriverInfo().Name, CapRWX)
|
||||
if !l.driver.GetDriverInfo().Capabilities[storageapi.CapRWX] {
|
||||
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", l.driver.GetDriverInfo().Name, storageapi.CapRWX)
|
||||
}
|
||||
|
||||
// Check different-node test requirement
|
||||
@ -390,7 +391,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
|
||||
|
||||
// Create volume
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
resource := CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
|
||||
resource := storageapi.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.resources = append(l.resources, resource)
|
||||
|
||||
// Test access to the volume from pods on different node
|
||||
@ -423,7 +424,7 @@ func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, n
|
||||
index := i + 1
|
||||
path := fmt.Sprintf("/mnt/volume%d", index)
|
||||
ginkgo.By(fmt.Sprintf("Checking if the volume%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
|
||||
utils.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, path)
|
||||
e2evolume.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, path)
|
||||
|
||||
if readSeedBase > 0 {
|
||||
ginkgo.By(fmt.Sprintf("Checking if read from the volume%d works properly", index))
|
||||
@ -524,7 +525,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
|
||||
for i, pod := range pods {
|
||||
index := i + 1
|
||||
ginkgo.By(fmt.Sprintf("Checking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
|
||||
utils.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, path)
|
||||
e2evolume.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, path)
|
||||
|
||||
if readOnly {
|
||||
ginkgo.By("Skipping volume content checks, volume is read-only")
|
||||
@ -560,7 +561,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
|
||||
index := i + 1
|
||||
// index of pod and index of pvc match, because pods are created above way
|
||||
ginkgo.By(fmt.Sprintf("Rechecking if the volume in pod%d exists as expected volume mode (%s)", index, *pvc.Spec.VolumeMode))
|
||||
utils.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, "/mnt/volume1")
|
||||
e2evolume.CheckVolumeModeOfPath(f, pod, *pvc.Spec.VolumeMode, "/mnt/volume1")
|
||||
|
||||
if readOnly {
|
||||
ginkgo.By("Skipping volume content checks, volume is read-only")
|
||||
|
@ -38,7 +38,8 @@ import (
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
// StorageClassTest represents parameters to be used by provisioning tests.
|
||||
@ -62,14 +63,14 @@ type StorageClassTest struct {
|
||||
}
|
||||
|
||||
type provisioningTestSuite struct {
|
||||
tsInfo TestSuiteInfo
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
}
|
||||
|
||||
// InitCustomProvisioningTestSuite returns provisioningTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomProvisioningTestSuite(patterns []testpatterns.TestPattern) TestSuite {
|
||||
func InitCustomProvisioningTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
return &provisioningTestSuite{
|
||||
tsInfo: TestSuiteInfo{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
Name: "provisioning",
|
||||
TestPatterns: patterns,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
@ -81,33 +82,33 @@ func InitCustomProvisioningTestSuite(patterns []testpatterns.TestPattern) TestSu
|
||||
|
||||
// InitProvisioningTestSuite returns provisioningTestSuite that implements TestSuite interface\
|
||||
// using test suite default patterns
|
||||
func InitProvisioningTestSuite() TestSuite {
|
||||
patterns := []testpatterns.TestPattern{
|
||||
testpatterns.DefaultFsDynamicPV,
|
||||
testpatterns.BlockVolModeDynamicPV,
|
||||
testpatterns.NtfsDynamicPV,
|
||||
func InitProvisioningTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.DefaultFsDynamicPV,
|
||||
storageapi.BlockVolModeDynamicPV,
|
||||
storageapi.NtfsDynamicPV,
|
||||
}
|
||||
return InitCustomProvisioningTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (p *provisioningTestSuite) GetTestSuiteInfo() TestSuiteInfo {
|
||||
func (p *provisioningTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
return p.tsInfo
|
||||
}
|
||||
|
||||
func (p *provisioningTestSuite) SkipUnsupportedTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (p *provisioningTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
// Check preconditions.
|
||||
if pattern.VolType != testpatterns.DynamicPV {
|
||||
if pattern.VolType != storageapi.DynamicPV {
|
||||
e2eskipper.Skipf("Suite %q does not support %v", p.tsInfo.Name, pattern.VolType)
|
||||
}
|
||||
dInfo := driver.GetDriverInfo()
|
||||
if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[CapBlock] {
|
||||
if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[storageapi.CapBlock] {
|
||||
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolMode)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (p *provisioningTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
type local struct {
|
||||
config *PerTestConfig
|
||||
config *storageapi.PerTestConfig
|
||||
driverCleanup func()
|
||||
|
||||
testCase *StorageClassTest
|
||||
@ -120,24 +121,24 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
}
|
||||
var (
|
||||
dInfo = driver.GetDriverInfo()
|
||||
dDriver DynamicPVTestDriver
|
||||
dDriver storageapi.DynamicPVTestDriver
|
||||
l local
|
||||
)
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("provisioning", getDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("provisioning", storageapi.GetDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
dDriver, _ = driver.(DynamicPVTestDriver)
|
||||
dDriver, _ = driver.(storageapi.DynamicPVTestDriver)
|
||||
// Now do the more expensive test initialization.
|
||||
l.config, l.driverCleanup = driver.PrepareTest(f)
|
||||
l.migrationCheck = newMigrationOpCheck(f.ClientSet, dInfo.InTreePluginName)
|
||||
l.cs = l.config.Framework.ClientSet
|
||||
testVolumeSizeRange := p.GetTestSuiteInfo().SupportedSizeRange
|
||||
driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange
|
||||
claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
|
||||
claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
|
||||
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)
|
||||
|
||||
l.sc = dDriver.GetDynamicProvisionStorageClass(l.config, pattern.FsType)
|
||||
@ -168,7 +169,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
err := tryFunc(l.driverCleanup)
|
||||
err := storageutils.TryFunc(l.driverCleanup)
|
||||
l.driverCleanup = nil
|
||||
framework.ExpectNoError(err, "while cleaning up driver")
|
||||
|
||||
@ -194,14 +195,14 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
})
|
||||
|
||||
ginkgo.It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() {
|
||||
if !dInfo.Capabilities[CapSnapshotDataSource] {
|
||||
if !dInfo.Capabilities[storageapi.CapSnapshotDataSource] {
|
||||
e2eskipper.Skipf("Driver %q does not support populate data from snapshot - skipping", dInfo.Name)
|
||||
}
|
||||
if !dInfo.SupportedFsType.Has(pattern.FsType) {
|
||||
e2eskipper.Skipf("Driver %q does not support %q fs type - skipping", dInfo.Name, pattern.FsType)
|
||||
}
|
||||
|
||||
sDriver, ok := driver.(SnapshottableTestDriver)
|
||||
sDriver, ok := driver.(storageapi.SnapshottableTestDriver)
|
||||
if !ok {
|
||||
framework.Failf("Driver %q has CapSnapshotDataSource but does not implement SnapshottableTestDriver", dInfo.Name)
|
||||
}
|
||||
@ -210,7 +211,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
defer cleanup()
|
||||
|
||||
dc := l.config.Framework.DynamicClient
|
||||
testConfig := convertTestConfig(l.config)
|
||||
testConfig := storageapi.ConvertTestConfig(l.config)
|
||||
expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
|
||||
dataSource, cleanupFunc := prepareSnapshotDataSourceForProvisioning(f, testConfig, l.config, pattern, l.cs, dc, l.pvc, l.sc, sDriver, pattern.VolMode, expectedContent)
|
||||
defer cleanupFunc()
|
||||
@ -220,7 +221,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
ginkgo.By("checking whether the created volume has the pre-populated data")
|
||||
tests := []e2evolume.Test{
|
||||
{
|
||||
Volume: *createVolumeSource(claim.Name, false /* readOnly */),
|
||||
Volume: *storageutils.CreateVolumeSource(claim.Name, false /* readOnly */),
|
||||
Mode: pattern.VolMode,
|
||||
File: "index.html",
|
||||
ExpectedContent: expectedContent,
|
||||
@ -232,13 +233,13 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
})
|
||||
|
||||
ginkgo.It("should provision storage with pvc data source", func() {
|
||||
if !dInfo.Capabilities[CapPVCDataSource] {
|
||||
if !dInfo.Capabilities[storageapi.CapPVCDataSource] {
|
||||
e2eskipper.Skipf("Driver %q does not support cloning - skipping", dInfo.Name)
|
||||
}
|
||||
init()
|
||||
defer cleanup()
|
||||
|
||||
testConfig := convertTestConfig(l.config)
|
||||
testConfig := storageapi.ConvertTestConfig(l.config)
|
||||
expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
|
||||
dataSource, dataSourceCleanup := preparePVCDataSourceForProvisioning(f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent)
|
||||
defer dataSourceCleanup()
|
||||
@ -248,7 +249,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
ginkgo.By("checking whether the created volume has the pre-populated data")
|
||||
tests := []e2evolume.Test{
|
||||
{
|
||||
Volume: *createVolumeSource(claim.Name, false /* readOnly */),
|
||||
Volume: *storageutils.CreateVolumeSource(claim.Name, false /* readOnly */),
|
||||
Mode: pattern.VolMode,
|
||||
File: "index.html",
|
||||
ExpectedContent: expectedContent,
|
||||
@ -261,17 +262,17 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
|
||||
ginkgo.It("should provision storage with pvc data source in parallel [Slow]", func() {
|
||||
// Test cloning a single volume multiple times.
|
||||
if !dInfo.Capabilities[CapPVCDataSource] {
|
||||
if !dInfo.Capabilities[storageapi.CapPVCDataSource] {
|
||||
e2eskipper.Skipf("Driver %q does not support cloning - skipping", dInfo.Name)
|
||||
}
|
||||
if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[CapBlock] {
|
||||
if pattern.VolMode == v1.PersistentVolumeBlock && !dInfo.Capabilities[storageapi.CapBlock] {
|
||||
e2eskipper.Skipf("Driver %q does not support block volumes - skipping", dInfo.Name)
|
||||
}
|
||||
|
||||
init()
|
||||
defer cleanup()
|
||||
|
||||
testConfig := convertTestConfig(l.config)
|
||||
testConfig := storageapi.ConvertTestConfig(l.config)
|
||||
expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
|
||||
dataSource, dataSourceCleanup := preparePVCDataSourceForProvisioning(f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent)
|
||||
defer dataSourceCleanup()
|
||||
@ -296,7 +297,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
ginkgo.By(fmt.Sprintf("checking whether the created volume %d has the pre-populated data", i))
|
||||
tests := []e2evolume.Test{
|
||||
{
|
||||
Volume: *createVolumeSource(claim.Name, false /* readOnly */),
|
||||
Volume: *storageutils.CreateVolumeSource(claim.Name, false /* readOnly */),
|
||||
Mode: pattern.VolMode,
|
||||
File: "index.html",
|
||||
ExpectedContent: expectedContent,
|
||||
@ -563,7 +564,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
|
||||
class, err := t.Client.StorageV1().StorageClasses().Create(context.TODO(), t.Class, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
err = deleteStorageClass(t.Client, class.Name)
|
||||
err = storageutils.DeleteStorageClass(t.Client, class.Name)
|
||||
framework.ExpectNoError(err, "While deleting storage class")
|
||||
}()
|
||||
|
||||
@ -784,13 +785,13 @@ func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeCl
|
||||
func prepareSnapshotDataSourceForProvisioning(
|
||||
f *framework.Framework,
|
||||
config e2evolume.TestConfig,
|
||||
perTestConfig *PerTestConfig,
|
||||
pattern testpatterns.TestPattern,
|
||||
perTestConfig *storageapi.PerTestConfig,
|
||||
pattern storageapi.TestPattern,
|
||||
client clientset.Interface,
|
||||
dynamicClient dynamic.Interface,
|
||||
initClaim *v1.PersistentVolumeClaim,
|
||||
class *storagev1.StorageClass,
|
||||
sDriver SnapshottableTestDriver,
|
||||
sDriver storageapi.SnapshottableTestDriver,
|
||||
mode v1.PersistentVolumeMode,
|
||||
injectContent string,
|
||||
) (*v1.TypedLocalObjectReference, func()) {
|
||||
@ -808,7 +809,7 @@ func prepareSnapshotDataSourceForProvisioning(
|
||||
// write namespace to the /mnt/test (= the volume).
|
||||
tests := []e2evolume.Test{
|
||||
{
|
||||
Volume: *createVolumeSource(updatedClaim.Name, false /* readOnly */),
|
||||
Volume: *storageutils.CreateVolumeSource(updatedClaim.Name, false /* readOnly */),
|
||||
Mode: mode,
|
||||
File: "index.html",
|
||||
ExpectedContent: injectContent,
|
||||
@ -816,7 +817,7 @@ func prepareSnapshotDataSourceForProvisioning(
|
||||
}
|
||||
e2evolume.InjectContent(f, config, nil, "", tests)
|
||||
|
||||
snapshotResource := CreateSnapshotResource(sDriver, perTestConfig, pattern, updatedClaim.GetName(), updatedClaim.GetNamespace(), f.Timeouts)
|
||||
snapshotResource := storageapi.CreateSnapshotResource(sDriver, perTestConfig, pattern, updatedClaim.GetName(), updatedClaim.GetNamespace(), f.Timeouts)
|
||||
|
||||
group := "snapshot.storage.k8s.io"
|
||||
dataSourceRef := &v1.TypedLocalObjectReference{
|
||||
@ -867,7 +868,7 @@ func preparePVCDataSourceForProvisioning(
|
||||
|
||||
tests := []e2evolume.Test{
|
||||
{
|
||||
Volume: *createVolumeSource(sourcePVC.Name, false /* readOnly */),
|
||||
Volume: *storageutils.CreateVolumeSource(sourcePVC.Name, false /* readOnly */),
|
||||
Mode: mode,
|
||||
File: "index.html",
|
||||
ExpectedContent: injectContent,
|
||||
|
@ -28,9 +28,6 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@ -38,42 +35,28 @@ import (
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
// snapshot CRD api group
|
||||
const snapshotGroup = "snapshot.storage.k8s.io"
|
||||
|
||||
// snapshot CRD api version
|
||||
const snapshotAPIVersion = "snapshot.storage.k8s.io/v1"
|
||||
|
||||
// data file name
|
||||
const datapath = "/mnt/test/data"
|
||||
|
||||
var (
|
||||
// SnapshotGVR is GroupVersionResource for volumesnapshots
|
||||
SnapshotGVR = schema.GroupVersionResource{Group: snapshotGroup, Version: "v1", Resource: "volumesnapshots"}
|
||||
// SnapshotClassGVR is GroupVersionResource for volumesnapshotclasses
|
||||
SnapshotClassGVR = schema.GroupVersionResource{Group: snapshotGroup, Version: "v1", Resource: "volumesnapshotclasses"}
|
||||
// SnapshotContentGVR is GroupVersionResource for volumesnapshotcontents
|
||||
SnapshotContentGVR = schema.GroupVersionResource{Group: snapshotGroup, Version: "v1", Resource: "volumesnapshotcontents"}
|
||||
)
|
||||
|
||||
type snapshottableTestSuite struct {
|
||||
tsInfo TestSuiteInfo
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
}
|
||||
|
||||
var (
|
||||
sDriver SnapshottableTestDriver
|
||||
dDriver DynamicPVTestDriver
|
||||
sDriver storageapi.SnapshottableTestDriver
|
||||
dDriver storageapi.DynamicPVTestDriver
|
||||
)
|
||||
|
||||
// InitCustomSnapshottableTestSuite returns snapshottableTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomSnapshottableTestSuite(patterns []testpatterns.TestPattern) TestSuite {
|
||||
func InitCustomSnapshottableTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
return &snapshottableTestSuite{
|
||||
tsInfo: TestSuiteInfo{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
Name: "snapshottable",
|
||||
TestPatterns: patterns,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
@ -86,35 +69,35 @@ func InitCustomSnapshottableTestSuite(patterns []testpatterns.TestPattern) TestS
|
||||
|
||||
// InitSnapshottableTestSuite returns snapshottableTestSuite that implements TestSuite interface
|
||||
// using testsuite default patterns
|
||||
func InitSnapshottableTestSuite() TestSuite {
|
||||
patterns := []testpatterns.TestPattern{
|
||||
testpatterns.DynamicSnapshotDelete,
|
||||
testpatterns.DynamicSnapshotRetain,
|
||||
testpatterns.PreprovisionedSnapshotDelete,
|
||||
testpatterns.PreprovisionedSnapshotRetain,
|
||||
func InitSnapshottableTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.DynamicSnapshotDelete,
|
||||
storageapi.DynamicSnapshotRetain,
|
||||
storageapi.PreprovisionedSnapshotDelete,
|
||||
storageapi.PreprovisionedSnapshotRetain,
|
||||
}
|
||||
return InitCustomSnapshottableTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (s *snapshottableTestSuite) GetTestSuiteInfo() TestSuiteInfo {
|
||||
func (s *snapshottableTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
return s.tsInfo
|
||||
}
|
||||
|
||||
func (s *snapshottableTestSuite) SkipUnsupportedTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (s *snapshottableTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
// Check preconditions.
|
||||
dInfo := driver.GetDriverInfo()
|
||||
ok := false
|
||||
_, ok = driver.(SnapshottableTestDriver)
|
||||
if !dInfo.Capabilities[CapSnapshotDataSource] || !ok {
|
||||
_, ok = driver.(storageapi.SnapshottableTestDriver)
|
||||
if !dInfo.Capabilities[storageapi.CapSnapshotDataSource] || !ok {
|
||||
e2eskipper.Skipf("Driver %q does not support snapshots - skipping", dInfo.Name)
|
||||
}
|
||||
_, ok = driver.(DynamicPVTestDriver)
|
||||
_, ok = driver.(storageapi.DynamicPVTestDriver)
|
||||
if !ok {
|
||||
e2eskipper.Skipf("Driver %q does not support dynamic provisioning - skipping", driver.GetDriverInfo().Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (s *snapshottableTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
@ -123,7 +106,7 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
|
||||
ginkgo.Describe("volume snapshot controller", func() {
|
||||
var (
|
||||
err error
|
||||
config *PerTestConfig
|
||||
config *storageapi.PerTestConfig
|
||||
driverCleanup func()
|
||||
cleanupSteps []func()
|
||||
|
||||
@ -135,8 +118,8 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
|
||||
originalMntTestData string
|
||||
)
|
||||
init := func() {
|
||||
sDriver, _ = driver.(SnapshottableTestDriver)
|
||||
dDriver, _ = driver.(DynamicPVTestDriver)
|
||||
sDriver, _ = driver.(storageapi.SnapshottableTestDriver)
|
||||
dDriver, _ = driver.(storageapi.DynamicPVTestDriver)
|
||||
cleanupSteps = make([]func(), 0)
|
||||
// init snap class, create a source PV, PVC, Pod
|
||||
cs = f.ClientSet
|
||||
@ -146,11 +129,11 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
|
||||
config, driverCleanup = driver.PrepareTest(f)
|
||||
cleanupSteps = append(cleanupSteps, driverCleanup)
|
||||
|
||||
var volumeResource *VolumeResource
|
||||
var volumeResource *storageapi.VolumeResource
|
||||
cleanupSteps = append(cleanupSteps, func() {
|
||||
framework.ExpectNoError(volumeResource.CleanupResource())
|
||||
})
|
||||
volumeResource = CreateVolumeResource(dDriver, config, pattern, s.GetTestSuiteInfo().SupportedSizeRange)
|
||||
volumeResource = storageapi.CreateVolumeResource(dDriver, config, pattern, s.GetTestSuiteInfo().SupportedSizeRange)
|
||||
|
||||
pvc = volumeResource.Pvc
|
||||
sc = volumeResource.Sc
|
||||
@ -185,7 +168,7 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
|
||||
// Depending on how far the test executed, cleanup accordingly
|
||||
// Execute in reverse order, similar to defer stack
|
||||
for i := len(cleanupSteps) - 1; i >= 0; i-- {
|
||||
err := tryFunc(cleanupSteps[i])
|
||||
err := storageutils.TryFunc(cleanupSteps[i])
|
||||
framework.ExpectNoError(err, "while running cleanup steps")
|
||||
}
|
||||
|
||||
@ -205,11 +188,11 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
|
||||
)
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
var sr *SnapshotResource
|
||||
var sr *storageapi.SnapshotResource
|
||||
cleanupSteps = append(cleanupSteps, func() {
|
||||
framework.ExpectNoError(sr.CleanupResource(f.Timeouts))
|
||||
})
|
||||
sr = CreateSnapshotResource(sDriver, config, pattern, pvc.GetName(), pvc.GetNamespace(), f.Timeouts)
|
||||
sr = storageapi.CreateSnapshotResource(sDriver, config, pattern, pvc.GetName(), pvc.GetNamespace(), f.Timeouts)
|
||||
vs = sr.Vs
|
||||
vscontent = sr.Vscontent
|
||||
vsc = sr.Vsclass
|
||||
@ -217,13 +200,13 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
|
||||
ginkgo.It("should check snapshot fields, check restore correctly works after modifying source data, check deletion", func() {
|
||||
ginkgo.By("checking the snapshot")
|
||||
// Get new copy of the snapshot
|
||||
vs, err = dc.Resource(SnapshotGVR).Namespace(vs.GetNamespace()).Get(context.TODO(), vs.GetName(), metav1.GetOptions{})
|
||||
vs, err = dc.Resource(storageutils.SnapshotGVR).Namespace(vs.GetNamespace()).Get(context.TODO(), vs.GetName(), metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Get the bound snapshotContent
|
||||
snapshotStatus := vs.Object["status"].(map[string]interface{})
|
||||
snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string)
|
||||
vscontent, err = dc.Resource(SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{})
|
||||
vscontent, err = dc.Resource(storageutils.SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
snapshotContentSpec := vscontent.Object["spec"].(map[string]interface{})
|
||||
@ -232,7 +215,7 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
|
||||
// Check SnapshotContent properties
|
||||
ginkgo.By("checking the SnapshotContent")
|
||||
// PreprovisionedCreatedSnapshot do not need to set volume snapshot class name
|
||||
if pattern.SnapshotType != testpatterns.PreprovisionedCreatedSnapshot {
|
||||
if pattern.SnapshotType != storageapi.PreprovisionedCreatedSnapshot {
|
||||
framework.ExpectEqual(snapshotContentSpec["volumeSnapshotClassName"], vsc.GetName())
|
||||
}
|
||||
framework.ExpectEqual(volumeSnapshotRef["name"], vs.GetName())
|
||||
@ -285,316 +268,20 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("should delete the VolumeSnapshotContent according to its deletion policy")
|
||||
err = DeleteAndWaitSnapshot(dc, vs.GetNamespace(), vs.GetName(), framework.Poll, f.Timeouts.SnapshotDelete)
|
||||
err = storageapi.DeleteAndWaitSnapshot(dc, vs.GetNamespace(), vs.GetName(), framework.Poll, f.Timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
switch pattern.SnapshotDeletionPolicy {
|
||||
case testpatterns.DeleteSnapshot:
|
||||
case storageapi.DeleteSnapshot:
|
||||
ginkgo.By("checking the SnapshotContent has been deleted")
|
||||
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, vscontent.GetName(), framework.Poll, f.Timeouts.SnapshotDelete)
|
||||
err = utils.WaitForGVRDeletion(dc, storageutils.SnapshotContentGVR, vscontent.GetName(), framework.Poll, f.Timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
case testpatterns.RetainSnapshot:
|
||||
case storageapi.RetainSnapshot:
|
||||
ginkgo.By("checking the SnapshotContent has not been deleted")
|
||||
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, vscontent.GetName(), 1*time.Second /* poll */, 30*time.Second /* timeout */)
|
||||
err = utils.WaitForGVRDeletion(dc, storageutils.SnapshotContentGVR, vscontent.GetName(), 1*time.Second /* poll */, 30*time.Second /* timeout */)
|
||||
framework.ExpectError(err)
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// WaitForSnapshotReady waits for a VolumeSnapshot to be ready to use or until timeout occurs, whichever comes first.
|
||||
func WaitForSnapshotReady(c dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error {
|
||||
framework.Logf("Waiting up to %v for VolumeSnapshot %s to become ready", timeout, snapshotName)
|
||||
|
||||
if successful := utils.WaitUntil(poll, timeout, func() bool {
|
||||
snapshot, err := c.Resource(SnapshotGVR).Namespace(ns).Get(context.TODO(), snapshotName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get snapshot %q, retrying in %v. Error: %v", snapshotName, poll, err)
|
||||
return false
|
||||
}
|
||||
|
||||
status := snapshot.Object["status"]
|
||||
if status == nil {
|
||||
framework.Logf("VolumeSnapshot %s found but is not ready.", snapshotName)
|
||||
return false
|
||||
}
|
||||
value := status.(map[string]interface{})
|
||||
if value["readyToUse"] == true {
|
||||
framework.Logf("VolumeSnapshot %s found and is ready", snapshotName)
|
||||
return true
|
||||
}
|
||||
|
||||
framework.Logf("VolumeSnapshot %s found but is not ready.", snapshotName)
|
||||
return false
|
||||
}); successful {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("VolumeSnapshot %s is not ready within %v", snapshotName, timeout)
|
||||
}
|
||||
|
||||
// DeleteAndWaitSnapshot deletes a VolumeSnapshot and waits for it to be deleted or until timeout occurs, whichever comes first
|
||||
func DeleteAndWaitSnapshot(dc dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error {
|
||||
var err error
|
||||
ginkgo.By("deleting the snapshot")
|
||||
err = dc.Resource(SnapshotGVR).Namespace(ns).Delete(context.TODO(), snapshotName, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
ginkgo.By("checking the Snapshot has been deleted")
|
||||
err = utils.WaitForNamespacedGVRDeletion(dc, SnapshotGVR, ns, snapshotName, poll, timeout)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SnapshotResource represents a snapshot class, a snapshot and its bound snapshot contents for a specific test case
|
||||
type SnapshotResource struct {
|
||||
Config *PerTestConfig
|
||||
Pattern testpatterns.TestPattern
|
||||
|
||||
Vs *unstructured.Unstructured
|
||||
Vscontent *unstructured.Unstructured
|
||||
Vsclass *unstructured.Unstructured
|
||||
}
|
||||
|
||||
// CreateSnapshot creates a VolumeSnapshotClass with given SnapshotDeletionPolicy and a VolumeSnapshot
|
||||
// from the VolumeSnapshotClass using a dynamic client.
|
||||
// Returns the unstructured VolumeSnapshotClass and VolumeSnapshot objects.
|
||||
func CreateSnapshot(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, pvcName string, pvcNamespace string, timeouts *framework.TimeoutContext) (*unstructured.Unstructured, *unstructured.Unstructured) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
var err error
|
||||
if pattern.SnapshotType != testpatterns.DynamicCreatedSnapshot && pattern.SnapshotType != testpatterns.PreprovisionedCreatedSnapshot {
|
||||
err = fmt.Errorf("SnapshotType must be set to either DynamicCreatedSnapshot or PreprovisionedCreatedSnapshot")
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
dc := config.Framework.DynamicClient
|
||||
|
||||
ginkgo.By("creating a SnapshotClass")
|
||||
sclass := sDriver.GetSnapshotClass(config)
|
||||
if sclass == nil {
|
||||
framework.Failf("Failed to get snapshot class based on test config")
|
||||
}
|
||||
sclass.Object["deletionPolicy"] = pattern.SnapshotDeletionPolicy.String()
|
||||
|
||||
sclass, err = dc.Resource(SnapshotClassGVR).Create(context.TODO(), sclass, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
sclass, err = dc.Resource(SnapshotClassGVR).Get(context.TODO(), sclass.GetName(), metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("creating a dynamic VolumeSnapshot")
|
||||
// prepare a dynamically provisioned volume snapshot with certain data
|
||||
snapshot := getSnapshot(pvcName, pvcNamespace, sclass.GetName())
|
||||
|
||||
snapshot, err = dc.Resource(SnapshotGVR).Namespace(snapshot.GetNamespace()).Create(context.TODO(), snapshot, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
return sclass, snapshot
|
||||
}
|
||||
|
||||
// GetSnapshotContentFromSnapshot returns the VolumeSnapshotContent object Bound to a
|
||||
// given VolumeSnapshot
|
||||
func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured.Unstructured) *unstructured.Unstructured {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
err := WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
vs, err := dc.Resource(SnapshotGVR).Namespace(snapshot.GetNamespace()).Get(context.TODO(), snapshot.GetName(), metav1.GetOptions{})
|
||||
|
||||
snapshotStatus := vs.Object["status"].(map[string]interface{})
|
||||
snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string)
|
||||
framework.Logf("received snapshotStatus %v", snapshotStatus)
|
||||
framework.Logf("snapshotContentName %s", snapshotContentName)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
vscontent, err := dc.Resource(SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
return vscontent
|
||||
|
||||
}
|
||||
|
||||
// CreateSnapshotResource creates a snapshot resource for the current test. It knows how to deal with
|
||||
// different test pattern snapshot provisioning and deletion policy
|
||||
func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, pvcName string, pvcNamespace string, timeouts *framework.TimeoutContext) *SnapshotResource {
|
||||
var err error
|
||||
r := SnapshotResource{
|
||||
Config: config,
|
||||
Pattern: pattern,
|
||||
}
|
||||
r.Vsclass, r.Vs = CreateSnapshot(sDriver, config, pattern, pvcName, pvcNamespace, timeouts)
|
||||
|
||||
dc := r.Config.Framework.DynamicClient
|
||||
|
||||
r.Vscontent = GetSnapshotContentFromSnapshot(dc, r.Vs)
|
||||
|
||||
if pattern.SnapshotType == testpatterns.PreprovisionedCreatedSnapshot {
|
||||
// prepare a pre-provisioned VolumeSnapshotContent with certain data
|
||||
// Because this could be run with an external CSI driver, we have no way
|
||||
// to pre-provision the snapshot as we normally would using their API.
|
||||
// We instead dynamically take a snapshot (above step), delete the old snapshot,
|
||||
// and create another snapshot using the first snapshot's snapshot handle.
|
||||
|
||||
ginkgo.By("updating the snapshot content deletion policy to retain")
|
||||
r.Vscontent.Object["spec"].(map[string]interface{})["deletionPolicy"] = "Retain"
|
||||
|
||||
r.Vscontent, err = dc.Resource(SnapshotContentGVR).Update(context.TODO(), r.Vscontent, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("recording the volume handle and snapshotHandle")
|
||||
snapshotHandle := r.Vscontent.Object["status"].(map[string]interface{})["snapshotHandle"].(string)
|
||||
framework.Logf("Recording snapshot handle: %s", snapshotHandle)
|
||||
csiDriverName := r.Vsclass.Object["driver"].(string)
|
||||
|
||||
// If the deletion policy is retain on vscontent:
|
||||
// when vs is deleted vscontent will not be deleted
|
||||
// when the vscontent is manually deleted then the underlying snapshot resource will not be deleted.
|
||||
// We exploit this to create a snapshot resource from which we can create a preprovisioned snapshot
|
||||
ginkgo.By("deleting the snapshot and snapshot content")
|
||||
err = dc.Resource(SnapshotGVR).Namespace(r.Vs.GetNamespace()).Delete(context.TODO(), r.Vs.GetName(), metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
err = nil
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("checking the Snapshot has been deleted")
|
||||
err = utils.WaitForNamespacedGVRDeletion(dc, SnapshotGVR, r.Vs.GetName(), r.Vs.GetNamespace(), framework.Poll, timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = dc.Resource(SnapshotContentGVR).Delete(context.TODO(), r.Vscontent.GetName(), metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
err = nil
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("checking the Snapshot content has been deleted")
|
||||
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, r.Vscontent.GetName(), framework.Poll, timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("creating a snapshot content with the snapshot handle")
|
||||
uuid := uuid.NewUUID()
|
||||
|
||||
snapName := getPreProvisionedSnapshotName(uuid)
|
||||
snapcontentName := getPreProvisionedSnapshotContentName(uuid)
|
||||
|
||||
r.Vscontent = getPreProvisionedSnapshotContent(snapcontentName, snapName, pvcNamespace, snapshotHandle, pattern.SnapshotDeletionPolicy.String(), csiDriverName)
|
||||
r.Vscontent, err = dc.Resource(SnapshotContentGVR).Create(context.TODO(), r.Vscontent, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("creating a snapshot with that snapshot content")
|
||||
r.Vs = getPreProvisionedSnapshot(snapName, pvcNamespace, snapcontentName)
|
||||
r.Vs, err = dc.Resource(SnapshotGVR).Namespace(r.Vs.GetNamespace()).Create(context.TODO(), r.Vs, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = WaitForSnapshotReady(dc, r.Vs.GetNamespace(), r.Vs.GetName(), framework.Poll, timeouts.SnapshotCreate)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("getting the snapshot and snapshot content")
|
||||
r.Vs, err = dc.Resource(SnapshotGVR).Namespace(r.Vs.GetNamespace()).Get(context.TODO(), r.Vs.GetName(), metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
r.Vscontent, err = dc.Resource(SnapshotContentGVR).Get(context.TODO(), r.Vscontent.GetName(), metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
return &r
|
||||
}
|
||||
|
||||
// CleanupResource cleans up the snapshot resource and ignores not found errors
|
||||
func (sr *SnapshotResource) CleanupResource(timeouts *framework.TimeoutContext) error {
|
||||
var err error
|
||||
var cleanupErrs []error
|
||||
|
||||
dc := sr.Config.Framework.DynamicClient
|
||||
|
||||
if sr.Vs != nil {
|
||||
framework.Logf("deleting snapshot %q/%q", sr.Vs.GetNamespace(), sr.Vs.GetName())
|
||||
|
||||
sr.Vs, err = dc.Resource(SnapshotGVR).Namespace(sr.Vs.GetNamespace()).Get(context.TODO(), sr.Vs.GetName(), metav1.GetOptions{})
|
||||
switch {
|
||||
case err == nil:
|
||||
snapshotStatus := sr.Vs.Object["status"].(map[string]interface{})
|
||||
snapshotContentName := snapshotStatus["boundVolumeSnapshotContentName"].(string)
|
||||
framework.Logf("received snapshotStatus %v", snapshotStatus)
|
||||
framework.Logf("snapshotContentName %s", snapshotContentName)
|
||||
|
||||
boundVsContent, err := dc.Resource(SnapshotContentGVR).Get(context.TODO(), snapshotContentName, metav1.GetOptions{})
|
||||
switch {
|
||||
case err == nil:
|
||||
if boundVsContent.Object["spec"].(map[string]interface{})["deletionPolicy"] != "Delete" {
|
||||
// The purpose of this block is to prevent physical snapshotContent leaks.
|
||||
// We must update the SnapshotContent to have Delete Deletion policy,
|
||||
// or else the physical snapshot content will be leaked.
|
||||
boundVsContent.Object["spec"].(map[string]interface{})["deletionPolicy"] = "Delete"
|
||||
boundVsContent, err = dc.Resource(SnapshotContentGVR).Update(context.TODO(), boundVsContent, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
err = dc.Resource(SnapshotGVR).Namespace(sr.Vs.GetNamespace()).Delete(context.TODO(), sr.Vs.GetName(), metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
err = nil
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, boundVsContent.GetName(), framework.Poll, timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
case apierrors.IsNotFound(err):
|
||||
// the volume snapshot is not bound to snapshot content yet
|
||||
err = dc.Resource(SnapshotGVR).Namespace(sr.Vs.GetNamespace()).Delete(context.TODO(), sr.Vs.GetName(), metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
err = nil
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = utils.WaitForNamespacedGVRDeletion(dc, SnapshotGVR, sr.Vs.GetName(), sr.Vs.GetNamespace(), framework.Poll, timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
default:
|
||||
cleanupErrs = append(cleanupErrs, err)
|
||||
}
|
||||
case apierrors.IsNotFound(err):
|
||||
// Hope that the underlying snapshot content and resource is gone already
|
||||
default:
|
||||
cleanupErrs = append(cleanupErrs, err)
|
||||
}
|
||||
}
|
||||
if sr.Vscontent != nil {
|
||||
framework.Logf("deleting snapshot content %q", sr.Vscontent.GetName())
|
||||
|
||||
sr.Vscontent, err = dc.Resource(SnapshotContentGVR).Get(context.TODO(), sr.Vscontent.GetName(), metav1.GetOptions{})
|
||||
switch {
|
||||
case err == nil:
|
||||
if sr.Vscontent.Object["spec"].(map[string]interface{})["deletionPolicy"] != "Delete" {
|
||||
// The purpose of this block is to prevent physical snapshotContent leaks.
|
||||
// We must update the SnapshotContent to have Delete Deletion policy,
|
||||
// or else the physical snapshot content will be leaked.
|
||||
sr.Vscontent.Object["spec"].(map[string]interface{})["deletionPolicy"] = "Delete"
|
||||
sr.Vscontent, err = dc.Resource(SnapshotContentGVR).Update(context.TODO(), sr.Vscontent, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
err = dc.Resource(SnapshotContentGVR).Delete(context.TODO(), sr.Vscontent.GetName(), metav1.DeleteOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
err = nil
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, sr.Vscontent.GetName(), framework.Poll, timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
case apierrors.IsNotFound(err):
|
||||
// Hope the underlying physical snapshot resource has been deleted already
|
||||
default:
|
||||
cleanupErrs = append(cleanupErrs, err)
|
||||
}
|
||||
}
|
||||
if sr.Vsclass != nil {
|
||||
framework.Logf("deleting snapshot class %q", sr.Vsclass.GetName())
|
||||
// typically this snapshot class has already been deleted
|
||||
err = dc.Resource(SnapshotClassGVR).Delete(context.TODO(), sr.Vsclass.GetName(), metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
framework.Failf("Error deleting snapshot class %q. Error: %v", sr.Vsclass.GetName(), err)
|
||||
}
|
||||
err = utils.WaitForGVRDeletion(dc, SnapshotClassGVR, sr.Vsclass.GetName(), framework.Poll, timeouts.SnapshotDelete)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
return utilerrors.NewAggregate(cleanupErrs)
|
||||
}
|
||||
|
@ -33,21 +33,22 @@ import (
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
type snapshottableStressTestSuite struct {
|
||||
tsInfo TestSuiteInfo
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
}
|
||||
|
||||
type snapshottableStressTest struct {
|
||||
config *PerTestConfig
|
||||
testOptions VolumeSnapshotStressTestOptions
|
||||
config *storageapi.PerTestConfig
|
||||
testOptions storageapi.VolumeSnapshotStressTestOptions
|
||||
driverCleanup func()
|
||||
|
||||
pods []*v1.Pod
|
||||
volumes []*VolumeResource
|
||||
snapshots []*SnapshotResource
|
||||
volumes []*storageapi.VolumeResource
|
||||
snapshots []*storageapi.SnapshotResource
|
||||
// Because we are appending snapshot resources in parallel goroutines.
|
||||
snapshotsMutex sync.Mutex
|
||||
|
||||
@ -59,9 +60,9 @@ type snapshottableStressTest struct {
|
||||
|
||||
// InitCustomSnapshottableStressTestSuite returns snapshottableStressTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomSnapshottableStressTestSuite(patterns []testpatterns.TestPattern) TestSuite {
|
||||
func InitCustomSnapshottableStressTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
return &snapshottableStressTestSuite{
|
||||
tsInfo: TestSuiteInfo{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
Name: "snapshottable-stress",
|
||||
TestPatterns: patterns,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
@ -74,19 +75,19 @@ func InitCustomSnapshottableStressTestSuite(patterns []testpatterns.TestPattern)
|
||||
|
||||
// InitSnapshottableStressTestSuite returns snapshottableStressTestSuite that implements TestSuite interface
|
||||
// using testsuite default patterns
|
||||
func InitSnapshottableStressTestSuite() TestSuite {
|
||||
patterns := []testpatterns.TestPattern{
|
||||
testpatterns.DynamicSnapshotDelete,
|
||||
testpatterns.DynamicSnapshotRetain,
|
||||
func InitSnapshottableStressTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.DynamicSnapshotDelete,
|
||||
storageapi.DynamicSnapshotRetain,
|
||||
}
|
||||
return InitCustomSnapshottableStressTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (t *snapshottableStressTestSuite) GetTestSuiteInfo() TestSuiteInfo {
|
||||
func (t *snapshottableStressTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
return t.tsInfo
|
||||
}
|
||||
|
||||
func (t *snapshottableStressTestSuite) SkipUnsupportedTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (t *snapshottableStressTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
driverInfo := driver.GetDriverInfo()
|
||||
var ok bool
|
||||
if driverInfo.VolumeSnapshotStressTestOptions == nil {
|
||||
@ -98,21 +99,21 @@ func (t *snapshottableStressTestSuite) SkipUnsupportedTests(driver TestDriver, p
|
||||
if driverInfo.VolumeSnapshotStressTestOptions.NumSnapshots <= 0 {
|
||||
framework.Failf("NumSnapshots in snapshot stress test options must be a positive integer, received: %d", driverInfo.VolumeSnapshotStressTestOptions.NumSnapshots)
|
||||
}
|
||||
_, ok = driver.(SnapshottableTestDriver)
|
||||
if !driverInfo.Capabilities[CapSnapshotDataSource] || !ok {
|
||||
_, ok = driver.(storageapi.SnapshottableTestDriver)
|
||||
if !driverInfo.Capabilities[storageapi.CapSnapshotDataSource] || !ok {
|
||||
e2eskipper.Skipf("Driver %q doesn't implement SnapshottableTestDriver - skipping", driverInfo.Name)
|
||||
}
|
||||
|
||||
_, ok = driver.(DynamicPVTestDriver)
|
||||
_, ok = driver.(storageapi.DynamicPVTestDriver)
|
||||
if !ok {
|
||||
e2eskipper.Skipf("Driver %s doesn't implement DynamicPVTestDriver -- skipping", driverInfo.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *snapshottableStressTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (t *snapshottableStressTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
var (
|
||||
driverInfo *DriverInfo
|
||||
snapshottableDriver SnapshottableTestDriver
|
||||
driverInfo *storageapi.DriverInfo
|
||||
snapshottableDriver storageapi.SnapshottableTestDriver
|
||||
cs clientset.Interface
|
||||
stressTest *snapshottableStressTest
|
||||
)
|
||||
@ -123,7 +124,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver TestDriver, pattern te
|
||||
|
||||
init := func() {
|
||||
driverInfo = driver.GetDriverInfo()
|
||||
snapshottableDriver, _ = driver.(SnapshottableTestDriver)
|
||||
snapshottableDriver, _ = driver.(storageapi.SnapshottableTestDriver)
|
||||
cs = f.ClientSet
|
||||
config, driverCleanup := driver.PrepareTest(f)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@ -131,8 +132,8 @@ func (t *snapshottableStressTestSuite) DefineTests(driver TestDriver, pattern te
|
||||
stressTest = &snapshottableStressTest{
|
||||
config: config,
|
||||
driverCleanup: driverCleanup,
|
||||
volumes: []*VolumeResource{},
|
||||
snapshots: []*SnapshotResource{},
|
||||
volumes: []*storageapi.VolumeResource{},
|
||||
snapshots: []*storageapi.SnapshotResource{},
|
||||
pods: []*v1.Pod{},
|
||||
testOptions: *driverInfo.VolumeSnapshotStressTestOptions,
|
||||
ctx: ctx,
|
||||
@ -144,7 +145,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver TestDriver, pattern te
|
||||
for i := 0; i < stressTest.testOptions.NumPods; i++ {
|
||||
framework.Logf("Creating resources for pod %d/%d", i, stressTest.testOptions.NumPods-1)
|
||||
|
||||
volume := CreateVolumeResource(driver, stressTest.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange)
|
||||
volume := storageapi.CreateVolumeResource(driver, stressTest.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange)
|
||||
stressTest.volumes = append(stressTest.volumes, volume)
|
||||
|
||||
podConfig := e2epod.Config{
|
||||
@ -196,7 +197,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver TestDriver, pattern te
|
||||
for i, snapshot := range stressTest.snapshots {
|
||||
wg.Add(1)
|
||||
|
||||
go func(i int, snapshot *SnapshotResource) {
|
||||
go func(i int, snapshot *storageapi.SnapshotResource) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
defer wg.Done()
|
||||
|
||||
@ -228,7 +229,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver TestDriver, pattern te
|
||||
for i, volume := range stressTest.volumes {
|
||||
wg.Add(1)
|
||||
|
||||
go func(i int, volume *VolumeResource) {
|
||||
go func(i int, volume *storageapi.VolumeResource) {
|
||||
defer ginkgo.GinkgoRecover()
|
||||
defer wg.Done()
|
||||
|
||||
@ -241,7 +242,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver TestDriver, pattern te
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
errs = append(errs, tryFunc(stressTest.driverCleanup))
|
||||
errs = append(errs, storageutils.TryFunc(stressTest.driverCleanup))
|
||||
|
||||
framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resources")
|
||||
}
|
||||
@ -274,7 +275,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver TestDriver, pattern te
|
||||
return
|
||||
default:
|
||||
framework.Logf("Pod-%d [%s], Iteration %d/%d", podIndex, pod.Name, snapshotIndex, stressTest.testOptions.NumSnapshots-1)
|
||||
snapshot := CreateSnapshotResource(snapshottableDriver, stressTest.config, pattern, volume.Pvc.GetName(), volume.Pvc.GetNamespace(), f.Timeouts)
|
||||
snapshot := storageapi.CreateSnapshotResource(snapshottableDriver, stressTest.config, pattern, volume.Pvc.GetName(), volume.Pvc.GetNamespace(), f.Timeouts)
|
||||
stressTest.snapshotsMutex.Lock()
|
||||
defer stressTest.snapshotsMutex.Unlock()
|
||||
stressTest.snapshots = append(stressTest.snapshots, snapshot)
|
||||
|
@ -39,8 +39,9 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@ -55,14 +56,14 @@ var (
|
||||
)
|
||||
|
||||
type subPathTestSuite struct {
|
||||
tsInfo TestSuiteInfo
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
}
|
||||
|
||||
// InitCustomSubPathTestSuite returns subPathTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomSubPathTestSuite(patterns []testpatterns.TestPattern) TestSuite {
|
||||
func InitCustomSubPathTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
return &subPathTestSuite{
|
||||
tsInfo: TestSuiteInfo{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
Name: "subPath",
|
||||
TestPatterns: patterns,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
@ -74,33 +75,33 @@ func InitCustomSubPathTestSuite(patterns []testpatterns.TestPattern) TestSuite {
|
||||
|
||||
// InitSubPathTestSuite returns subPathTestSuite that implements TestSuite interface
|
||||
// using testsuite default patterns
|
||||
func InitSubPathTestSuite() TestSuite {
|
||||
patterns := []testpatterns.TestPattern{
|
||||
testpatterns.DefaultFsInlineVolume,
|
||||
testpatterns.DefaultFsPreprovisionedPV,
|
||||
testpatterns.DefaultFsDynamicPV,
|
||||
testpatterns.NtfsDynamicPV,
|
||||
func InitSubPathTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.DefaultFsInlineVolume,
|
||||
storageapi.DefaultFsPreprovisionedPV,
|
||||
storageapi.DefaultFsDynamicPV,
|
||||
storageapi.NtfsDynamicPV,
|
||||
}
|
||||
return InitCustomSubPathTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (s *subPathTestSuite) GetTestSuiteInfo() TestSuiteInfo {
|
||||
func (s *subPathTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
return s.tsInfo
|
||||
}
|
||||
|
||||
func (s *subPathTestSuite) SkipUnsupportedTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
skipVolTypePatterns(pattern, driver, testpatterns.NewVolTypeMap(
|
||||
testpatterns.PreprovisionedPV,
|
||||
testpatterns.InlineVolume))
|
||||
func (s *subPathTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
skipVolTypePatterns(pattern, driver, storageapi.NewVolTypeMap(
|
||||
storageapi.PreprovisionedPV,
|
||||
storageapi.InlineVolume))
|
||||
}
|
||||
|
||||
func (s *subPathTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (s *subPathTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
type local struct {
|
||||
config *PerTestConfig
|
||||
config *storageapi.PerTestConfig
|
||||
driverCleanup func()
|
||||
|
||||
hostExec utils.HostExec
|
||||
resource *VolumeResource
|
||||
resource *storageapi.VolumeResource
|
||||
roVolSource *v1.VolumeSource
|
||||
pod *v1.Pod
|
||||
formatPod *v1.Pod
|
||||
@ -114,7 +115,7 @@ func (s *subPathTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("provisioning", getDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("provisioning", storageapi.GetDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
@ -123,24 +124,24 @@ func (s *subPathTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
|
||||
l.config, l.driverCleanup = driver.PrepareTest(f)
|
||||
l.migrationCheck = newMigrationOpCheck(f.ClientSet, driver.GetDriverInfo().InTreePluginName)
|
||||
testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange
|
||||
l.resource = CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.resource = storageapi.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.hostExec = utils.NewHostExec(f)
|
||||
|
||||
// Setup subPath test dependent resource
|
||||
volType := pattern.VolType
|
||||
switch volType {
|
||||
case testpatterns.InlineVolume:
|
||||
if iDriver, ok := driver.(InlineVolumeTestDriver); ok {
|
||||
case storageapi.InlineVolume:
|
||||
if iDriver, ok := driver.(storageapi.InlineVolumeTestDriver); ok {
|
||||
l.roVolSource = iDriver.GetVolumeSource(true, pattern.FsType, l.resource.Volume)
|
||||
}
|
||||
case testpatterns.PreprovisionedPV:
|
||||
case storageapi.PreprovisionedPV:
|
||||
l.roVolSource = &v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: l.resource.Pvc.Name,
|
||||
ReadOnly: true,
|
||||
},
|
||||
}
|
||||
case testpatterns.DynamicPV:
|
||||
case storageapi.DynamicPV:
|
||||
l.roVolSource = &v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: l.resource.Pvc.Name,
|
||||
@ -177,7 +178,7 @@ func (s *subPathTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
|
||||
l.resource = nil
|
||||
}
|
||||
|
||||
errs = append(errs, tryFunc(l.driverCleanup))
|
||||
errs = append(errs, storageutils.TryFunc(l.driverCleanup))
|
||||
l.driverCleanup = nil
|
||||
framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource")
|
||||
|
||||
|
@ -35,20 +35,21 @@ import (
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
type topologyTestSuite struct {
|
||||
tsInfo TestSuiteInfo
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
}
|
||||
|
||||
type topologyTest struct {
|
||||
config *PerTestConfig
|
||||
config *storageapi.PerTestConfig
|
||||
driverCleanup func()
|
||||
|
||||
migrationCheck *migrationOpCheck
|
||||
|
||||
resource VolumeResource
|
||||
resource storageapi.VolumeResource
|
||||
pod *v1.Pod
|
||||
allTopologies []topology
|
||||
}
|
||||
@ -57,9 +58,9 @@ type topology map[string]string
|
||||
|
||||
// InitCustomTopologyTestSuite returns topologyTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomTopologyTestSuite(patterns []testpatterns.TestPattern) TestSuite {
|
||||
func InitCustomTopologyTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
return &topologyTestSuite{
|
||||
tsInfo: TestSuiteInfo{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
Name: "topology",
|
||||
TestPatterns: patterns,
|
||||
},
|
||||
@ -68,51 +69,51 @@ func InitCustomTopologyTestSuite(patterns []testpatterns.TestPattern) TestSuite
|
||||
|
||||
// InitTopologyTestSuite returns topologyTestSuite that implements TestSuite interface
|
||||
// using testsuite default patterns
|
||||
func InitTopologyTestSuite() TestSuite {
|
||||
patterns := []testpatterns.TestPattern{
|
||||
testpatterns.TopologyImmediate,
|
||||
testpatterns.TopologyDelayed,
|
||||
func InitTopologyTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.TopologyImmediate,
|
||||
storageapi.TopologyDelayed,
|
||||
}
|
||||
return InitCustomTopologyTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (t *topologyTestSuite) GetTestSuiteInfo() TestSuiteInfo {
|
||||
func (t *topologyTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
return t.tsInfo
|
||||
}
|
||||
|
||||
func (t *topologyTestSuite) SkipUnsupportedTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (t *topologyTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
dInfo := driver.GetDriverInfo()
|
||||
var ok bool
|
||||
_, ok = driver.(DynamicPVTestDriver)
|
||||
_, ok = driver.(storageapi.DynamicPVTestDriver)
|
||||
if !ok {
|
||||
e2eskipper.Skipf("Driver %s doesn't support %v -- skipping", dInfo.Name, pattern.VolType)
|
||||
}
|
||||
|
||||
if !dInfo.Capabilities[CapTopology] {
|
||||
if !dInfo.Capabilities[storageapi.CapTopology] {
|
||||
e2eskipper.Skipf("Driver %q does not support topology - skipping", dInfo.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *topologyTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (t *topologyTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
var (
|
||||
dInfo = driver.GetDriverInfo()
|
||||
dDriver DynamicPVTestDriver
|
||||
dDriver storageapi.DynamicPVTestDriver
|
||||
cs clientset.Interface
|
||||
err error
|
||||
)
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("topology", getDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("topology", storageapi.GetDriverTimeouts(driver))
|
||||
|
||||
init := func() topologyTest {
|
||||
dDriver, _ = driver.(DynamicPVTestDriver)
|
||||
dDriver, _ = driver.(storageapi.DynamicPVTestDriver)
|
||||
l := topologyTest{}
|
||||
|
||||
// Now do the more expensive test initialization.
|
||||
l.config, l.driverCleanup = driver.PrepareTest(f)
|
||||
|
||||
l.resource = VolumeResource{
|
||||
l.resource = storageapi.VolumeResource{
|
||||
Config: l.config,
|
||||
Pattern: pattern,
|
||||
}
|
||||
@ -141,7 +142,7 @@ func (t *topologyTestSuite) DefineTests(driver TestDriver, pattern testpatterns.
|
||||
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange
|
||||
claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
|
||||
claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
|
||||
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)
|
||||
l.resource.Pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
||||
ClaimSize: claimSize,
|
||||
@ -154,7 +155,7 @@ func (t *topologyTestSuite) DefineTests(driver TestDriver, pattern testpatterns.
|
||||
|
||||
cleanup := func(l topologyTest) {
|
||||
t.CleanupResources(cs, &l)
|
||||
err := tryFunc(l.driverCleanup)
|
||||
err := storageutils.TryFunc(l.driverCleanup)
|
||||
l.driverCleanup = nil
|
||||
framework.ExpectNoError(err, "while cleaning up driver")
|
||||
|
||||
|
@ -34,7 +34,8 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -52,14 +53,14 @@ const (
|
||||
)
|
||||
|
||||
type volumeExpandTestSuite struct {
|
||||
tsInfo TestSuiteInfo
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
}
|
||||
|
||||
// InitCustomVolumeExpandTestSuite returns volumeExpandTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomVolumeExpandTestSuite(patterns []testpatterns.TestPattern) TestSuite {
|
||||
func InitCustomVolumeExpandTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
return &volumeExpandTestSuite{
|
||||
tsInfo: TestSuiteInfo{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
Name: "volume-expand",
|
||||
TestPatterns: patterns,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
@ -71,39 +72,39 @@ func InitCustomVolumeExpandTestSuite(patterns []testpatterns.TestPattern) TestSu
|
||||
|
||||
// InitVolumeExpandTestSuite returns volumeExpandTestSuite that implements TestSuite interface
|
||||
// using testsuite default patterns
|
||||
func InitVolumeExpandTestSuite() TestSuite {
|
||||
patterns := []testpatterns.TestPattern{
|
||||
testpatterns.DefaultFsDynamicPV,
|
||||
testpatterns.BlockVolModeDynamicPV,
|
||||
testpatterns.DefaultFsDynamicPVAllowExpansion,
|
||||
testpatterns.BlockVolModeDynamicPVAllowExpansion,
|
||||
testpatterns.NtfsDynamicPV,
|
||||
testpatterns.NtfsDynamicPVAllowExpansion,
|
||||
func InitVolumeExpandTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.DefaultFsDynamicPV,
|
||||
storageapi.BlockVolModeDynamicPV,
|
||||
storageapi.DefaultFsDynamicPVAllowExpansion,
|
||||
storageapi.BlockVolModeDynamicPVAllowExpansion,
|
||||
storageapi.NtfsDynamicPV,
|
||||
storageapi.NtfsDynamicPVAllowExpansion,
|
||||
}
|
||||
return InitCustomVolumeExpandTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (v *volumeExpandTestSuite) GetTestSuiteInfo() TestSuiteInfo {
|
||||
func (v *volumeExpandTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
return v.tsInfo
|
||||
}
|
||||
|
||||
func (v *volumeExpandTestSuite) SkipUnsupportedTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (v *volumeExpandTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
// Check preconditions.
|
||||
if !driver.GetDriverInfo().Capabilities[CapControllerExpansion] {
|
||||
if !driver.GetDriverInfo().Capabilities[storageapi.CapControllerExpansion] {
|
||||
e2eskipper.Skipf("Driver %q does not support volume expansion - skipping", driver.GetDriverInfo().Name)
|
||||
}
|
||||
// Check preconditions.
|
||||
if !driver.GetDriverInfo().Capabilities[CapBlock] && pattern.VolMode == v1.PersistentVolumeBlock {
|
||||
if !driver.GetDriverInfo().Capabilities[storageapi.CapBlock] && pattern.VolMode == v1.PersistentVolumeBlock {
|
||||
e2eskipper.Skipf("Driver %q does not support block volume mode - skipping", driver.GetDriverInfo().Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (v *volumeExpandTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
type local struct {
|
||||
config *PerTestConfig
|
||||
config *storageapi.PerTestConfig
|
||||
driverCleanup func()
|
||||
|
||||
resource *VolumeResource
|
||||
resource *storageapi.VolumeResource
|
||||
pod *v1.Pod
|
||||
pod2 *v1.Pod
|
||||
|
||||
@ -113,7 +114,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volume-expand", getDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volume-expand", storageapi.GetDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
@ -122,7 +123,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
l.config, l.driverCleanup = driver.PrepareTest(f)
|
||||
l.migrationCheck = newMigrationOpCheck(f.ClientSet, driver.GetDriverInfo().InTreePluginName)
|
||||
testVolumeSizeRange := v.GetTestSuiteInfo().SupportedSizeRange
|
||||
l.resource = CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.resource = storageapi.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
@ -146,7 +147,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
l.resource = nil
|
||||
}
|
||||
|
||||
errs = append(errs, tryFunc(l.driverCleanup))
|
||||
errs = append(errs, storageutils.TryFunc(l.driverCleanup))
|
||||
l.driverCleanup = nil
|
||||
framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource")
|
||||
l.migrationCheck.validateMigrationVolumeOpCounts()
|
||||
|
@ -40,30 +40,30 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
// MD5 hashes of the test file corresponding to each file size.
|
||||
// Test files are generated in testVolumeIO()
|
||||
// If test file generation algorithm changes, these must be recomputed.
|
||||
var md5hashes = map[int64]string{
|
||||
testpatterns.FileSizeSmall: "5c34c2813223a7ca05a3c2f38c0d1710",
|
||||
testpatterns.FileSizeMedium: "f2fa202b1ffeedda5f3a58bd1ae81104",
|
||||
testpatterns.FileSizeLarge: "8d763edc71bd16217664793b5a15e403",
|
||||
storageapi.FileSizeSmall: "5c34c2813223a7ca05a3c2f38c0d1710",
|
||||
storageapi.FileSizeMedium: "f2fa202b1ffeedda5f3a58bd1ae81104",
|
||||
storageapi.FileSizeLarge: "8d763edc71bd16217664793b5a15e403",
|
||||
}
|
||||
|
||||
const mountPath = "/opt"
|
||||
|
||||
type volumeIOTestSuite struct {
|
||||
tsInfo TestSuiteInfo
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
}
|
||||
|
||||
// InitCustomVolumeIOTestSuite returns volumeIOTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomVolumeIOTestSuite(patterns []testpatterns.TestPattern) TestSuite {
|
||||
func InitCustomVolumeIOTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
return &volumeIOTestSuite{
|
||||
tsInfo: TestSuiteInfo{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
Name: "volumeIO",
|
||||
TestPatterns: patterns,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
@ -75,31 +75,31 @@ func InitCustomVolumeIOTestSuite(patterns []testpatterns.TestPattern) TestSuite
|
||||
|
||||
// InitVolumeIOTestSuite returns volumeIOTestSuite that implements TestSuite interface
|
||||
// using testsuite default patterns
|
||||
func InitVolumeIOTestSuite() TestSuite {
|
||||
patterns := []testpatterns.TestPattern{
|
||||
testpatterns.DefaultFsInlineVolume,
|
||||
testpatterns.DefaultFsPreprovisionedPV,
|
||||
testpatterns.DefaultFsDynamicPV,
|
||||
func InitVolumeIOTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.DefaultFsInlineVolume,
|
||||
storageapi.DefaultFsPreprovisionedPV,
|
||||
storageapi.DefaultFsDynamicPV,
|
||||
}
|
||||
return InitCustomVolumeIOTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (t *volumeIOTestSuite) GetTestSuiteInfo() TestSuiteInfo {
|
||||
func (t *volumeIOTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
return t.tsInfo
|
||||
}
|
||||
|
||||
func (t *volumeIOTestSuite) SkipUnsupportedTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
skipVolTypePatterns(pattern, driver, testpatterns.NewVolTypeMap(
|
||||
testpatterns.PreprovisionedPV,
|
||||
testpatterns.InlineVolume))
|
||||
func (t *volumeIOTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
skipVolTypePatterns(pattern, driver, storageapi.NewVolTypeMap(
|
||||
storageapi.PreprovisionedPV,
|
||||
storageapi.InlineVolume))
|
||||
}
|
||||
|
||||
func (t *volumeIOTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (t *volumeIOTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
type local struct {
|
||||
config *PerTestConfig
|
||||
config *storageapi.PerTestConfig
|
||||
driverCleanup func()
|
||||
|
||||
resource *VolumeResource
|
||||
resource *storageapi.VolumeResource
|
||||
|
||||
migrationCheck *migrationOpCheck
|
||||
}
|
||||
@ -110,7 +110,7 @@ func (t *volumeIOTestSuite) DefineTests(driver TestDriver, pattern testpatterns.
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volumeio", getDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volumeio", storageapi.GetDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
@ -120,7 +120,7 @@ func (t *volumeIOTestSuite) DefineTests(driver TestDriver, pattern testpatterns.
|
||||
l.migrationCheck = newMigrationOpCheck(f.ClientSet, dInfo.InTreePluginName)
|
||||
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
l.resource = CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.resource = storageapi.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
if l.resource.VolSource == nil {
|
||||
e2eskipper.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
|
||||
}
|
||||
@ -135,7 +135,7 @@ func (t *volumeIOTestSuite) DefineTests(driver TestDriver, pattern testpatterns.
|
||||
}
|
||||
|
||||
if l.driverCleanup != nil {
|
||||
errs = append(errs, tryFunc(l.driverCleanup))
|
||||
errs = append(errs, storageutils.TryFunc(l.driverCleanup))
|
||||
l.driverCleanup = nil
|
||||
}
|
||||
|
||||
@ -151,23 +151,23 @@ func (t *volumeIOTestSuite) DefineTests(driver TestDriver, pattern testpatterns.
|
||||
fileSizes := createFileSizes(dInfo.MaxFileSize)
|
||||
testFile := fmt.Sprintf("%s_io_test_%s", dInfo.Name, f.Namespace.Name)
|
||||
var fsGroup *int64
|
||||
if !framework.NodeOSDistroIs("windows") && dInfo.Capabilities[CapFsGroup] {
|
||||
if !framework.NodeOSDistroIs("windows") && dInfo.Capabilities[storageapi.CapFsGroup] {
|
||||
fsGroupVal := int64(1234)
|
||||
fsGroup = &fsGroupVal
|
||||
}
|
||||
podSec := v1.PodSecurityContext{
|
||||
FSGroup: fsGroup,
|
||||
}
|
||||
err := testVolumeIO(f, cs, convertTestConfig(l.config), *l.resource.VolSource, &podSec, testFile, fileSizes)
|
||||
err := testVolumeIO(f, cs, storageapi.ConvertTestConfig(l.config), *l.resource.VolSource, &podSec, testFile, fileSizes)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
}
|
||||
|
||||
func createFileSizes(maxFileSize int64) []int64 {
|
||||
allFileSizes := []int64{
|
||||
testpatterns.FileSizeSmall,
|
||||
testpatterns.FileSizeMedium,
|
||||
testpatterns.FileSizeLarge,
|
||||
storageapi.FileSizeSmall,
|
||||
storageapi.FileSizeMedium,
|
||||
storageapi.FileSizeLarge,
|
||||
}
|
||||
fileSizes := []int64{}
|
||||
|
||||
@ -249,9 +249,9 @@ func makePodSpec(config e2evolume.TestConfig, initCmd string, volsrc v1.VolumeSo
|
||||
// Write `fsize` bytes to `fpath` in the pod, using dd and the `ddInput` file.
|
||||
func writeToFile(f *framework.Framework, pod *v1.Pod, fpath, ddInput string, fsize int64) error {
|
||||
ginkgo.By(fmt.Sprintf("writing %d bytes to test file %s", fsize, fpath))
|
||||
loopCnt := fsize / testpatterns.MinFileSize
|
||||
writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, ddInput, testpatterns.MinFileSize, fpath)
|
||||
stdout, stderr, err := utils.PodExec(f, pod, writeCmd)
|
||||
loopCnt := fsize / storageapi.MinFileSize
|
||||
writeCmd := fmt.Sprintf("i=0; while [ $i -lt %d ]; do dd if=%s bs=%d >>%s 2>/dev/null; let i+=1; done", loopCnt, ddInput, storageapi.MinFileSize, fpath)
|
||||
stdout, stderr, err := e2evolume.PodExec(f, pod, writeCmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error writing to volume using %q: %s\nstdout: %s\nstderr: %s", writeCmd, err, stdout, stderr)
|
||||
}
|
||||
@ -261,7 +261,7 @@ func writeToFile(f *framework.Framework, pod *v1.Pod, fpath, ddInput string, fsi
|
||||
// Verify that the test file is the expected size and contains the expected content.
|
||||
func verifyFile(f *framework.Framework, pod *v1.Pod, fpath string, expectSize int64, ddInput string) error {
|
||||
ginkgo.By("verifying file size")
|
||||
rtnstr, stderr, err := utils.PodExec(f, pod, fmt.Sprintf("stat -c %%s %s", fpath))
|
||||
rtnstr, stderr, err := e2evolume.PodExec(f, pod, fmt.Sprintf("stat -c %%s %s", fpath))
|
||||
if err != nil || rtnstr == "" {
|
||||
return fmt.Errorf("unable to get file size via `stat %s`: %v\nstdout: %s\nstderr: %s", fpath, err, rtnstr, stderr)
|
||||
}
|
||||
@ -274,7 +274,7 @@ func verifyFile(f *framework.Framework, pod *v1.Pod, fpath string, expectSize in
|
||||
}
|
||||
|
||||
ginkgo.By("verifying file hash")
|
||||
rtnstr, stderr, err = utils.PodExec(f, pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath))
|
||||
rtnstr, stderr, err = e2evolume.PodExec(f, pod, fmt.Sprintf("md5sum %s | cut -d' ' -f1", fpath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to test file hash via `md5sum %s`: %v\nstdout: %s\nstderr: %s", fpath, err, rtnstr, stderr)
|
||||
}
|
||||
@ -295,7 +295,7 @@ func verifyFile(f *framework.Framework, pod *v1.Pod, fpath string, expectSize in
|
||||
// Delete `fpath` to save some disk space on host. Delete errors are logged but ignored.
|
||||
func deleteFile(f *framework.Framework, pod *v1.Pod, fpath string) {
|
||||
ginkgo.By(fmt.Sprintf("deleting test file %s...", fpath))
|
||||
stdout, stderr, err := utils.PodExec(f, pod, fmt.Sprintf("rm -f %s", fpath))
|
||||
stdout, stderr, err := e2evolume.PodExec(f, pod, fmt.Sprintf("rm -f %s", fpath))
|
||||
if err != nil {
|
||||
// keep going, the test dir will be deleted when the volume is unmounted
|
||||
framework.Logf("unable to delete test file %s: %v\nerror ignored, continuing test\nstdout: %s\nstderr: %s", fpath, err, stdout, stderr)
|
||||
@ -311,7 +311,7 @@ func deleteFile(f *framework.Framework, pod *v1.Pod, fpath string) {
|
||||
func testVolumeIO(f *framework.Framework, cs clientset.Interface, config e2evolume.TestConfig, volsrc v1.VolumeSource, podSecContext *v1.PodSecurityContext, file string, fsizes []int64) (err error) {
|
||||
ddInput := filepath.Join(mountPath, fmt.Sprintf("%s-%s-dd_if", config.Prefix, config.Namespace))
|
||||
writeBlk := strings.Repeat("abcdefghijklmnopqrstuvwxyz123456", 32) // 1KiB value
|
||||
loopCnt := testpatterns.MinFileSize / int64(len(writeBlk))
|
||||
loopCnt := storageapi.MinFileSize / int64(len(writeBlk))
|
||||
// initContainer cmd to create and fill dd's input file. The initContainer is used to create
|
||||
// the `dd` input file which is currently 1MiB. Rather than store a 1MiB go value, a loop is
|
||||
// used to create a 1MiB file in the target directory.
|
||||
@ -348,8 +348,8 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config e2evolu
|
||||
// create files of the passed-in file sizes and verify test file size and content
|
||||
for _, fsize := range fsizes {
|
||||
// file sizes must be a multiple of `MinFileSize`
|
||||
if math.Mod(float64(fsize), float64(testpatterns.MinFileSize)) != 0 {
|
||||
fsize = fsize/testpatterns.MinFileSize + testpatterns.MinFileSize
|
||||
if math.Mod(float64(fsize), float64(storageapi.MinFileSize)) != 0 {
|
||||
fsize = fsize/storageapi.MinFileSize + storageapi.MinFileSize
|
||||
}
|
||||
fpath := filepath.Join(mountPath, fmt.Sprintf("%s-%d", file, fsize))
|
||||
defer func() {
|
||||
|
@ -32,36 +32,37 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
type volumeStressTestSuite struct {
|
||||
tsInfo TestSuiteInfo
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
}
|
||||
|
||||
type volumeStressTest struct {
|
||||
config *PerTestConfig
|
||||
config *storageapi.PerTestConfig
|
||||
driverCleanup func()
|
||||
|
||||
migrationCheck *migrationOpCheck
|
||||
|
||||
resources []*VolumeResource
|
||||
resources []*storageapi.VolumeResource
|
||||
pods []*v1.Pod
|
||||
// stop and wait for any async routines
|
||||
wg sync.WaitGroup
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
testOptions StressTestOptions
|
||||
testOptions storageapi.StressTestOptions
|
||||
}
|
||||
|
||||
var _ TestSuite = &volumeStressTestSuite{}
|
||||
var _ storageapi.TestSuite = &volumeStressTestSuite{}
|
||||
|
||||
// InitCustomVolumeStressTestSuite returns volumeStressTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomVolumeStressTestSuite(patterns []testpatterns.TestPattern) TestSuite {
|
||||
func InitCustomVolumeStressTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
return &volumeStressTestSuite{
|
||||
tsInfo: TestSuiteInfo{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
Name: "volume-stress",
|
||||
TestPatterns: patterns,
|
||||
},
|
||||
@ -70,19 +71,19 @@ func InitCustomVolumeStressTestSuite(patterns []testpatterns.TestPattern) TestSu
|
||||
|
||||
// InitVolumeStressTestSuite returns volumeStressTestSuite that implements TestSuite interface
|
||||
// using testsuite default patterns
|
||||
func InitVolumeStressTestSuite() TestSuite {
|
||||
patterns := []testpatterns.TestPattern{
|
||||
testpatterns.DefaultFsDynamicPV,
|
||||
testpatterns.BlockVolModeDynamicPV,
|
||||
func InitVolumeStressTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.DefaultFsDynamicPV,
|
||||
storageapi.BlockVolModeDynamicPV,
|
||||
}
|
||||
return InitCustomVolumeStressTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (t *volumeStressTestSuite) GetTestSuiteInfo() TestSuiteInfo {
|
||||
func (t *volumeStressTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
return t.tsInfo
|
||||
}
|
||||
|
||||
func (t *volumeStressTestSuite) SkipUnsupportedTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (t *volumeStressTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
dInfo := driver.GetDriverInfo()
|
||||
if dInfo.StressTestOptions == nil {
|
||||
e2eskipper.Skipf("Driver %s doesn't specify stress test options -- skipping", dInfo.Name)
|
||||
@ -94,15 +95,15 @@ func (t *volumeStressTestSuite) SkipUnsupportedTests(driver TestDriver, pattern
|
||||
framework.Failf("NumRestarts in stress test options must be a positive integer, received: %d", dInfo.StressTestOptions.NumRestarts)
|
||||
}
|
||||
|
||||
if _, ok := driver.(DynamicPVTestDriver); !ok {
|
||||
if _, ok := driver.(storageapi.DynamicPVTestDriver); !ok {
|
||||
e2eskipper.Skipf("Driver %s doesn't implement DynamicPVTestDriver -- skipping", dInfo.Name)
|
||||
}
|
||||
if !driver.GetDriverInfo().Capabilities[CapBlock] && pattern.VolMode == v1.PersistentVolumeBlock {
|
||||
if !driver.GetDriverInfo().Capabilities[storageapi.CapBlock] && pattern.VolMode == v1.PersistentVolumeBlock {
|
||||
e2eskipper.Skipf("Driver %q does not support block volume mode - skipping", dInfo.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *volumeStressTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (t *volumeStressTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
var (
|
||||
dInfo = driver.GetDriverInfo()
|
||||
cs clientset.Interface
|
||||
@ -111,7 +112,7 @@ func (t *volumeStressTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("stress", getDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("stress", storageapi.GetDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
cs = f.ClientSet
|
||||
@ -120,7 +121,7 @@ func (t *volumeStressTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
// Now do the more expensive test initialization.
|
||||
l.config, l.driverCleanup = driver.PrepareTest(f)
|
||||
l.migrationCheck = newMigrationOpCheck(f.ClientSet, dInfo.InTreePluginName)
|
||||
l.resources = []*VolumeResource{}
|
||||
l.resources = []*storageapi.VolumeResource{}
|
||||
l.pods = []*v1.Pod{}
|
||||
l.testOptions = *dInfo.StressTestOptions
|
||||
l.ctx, l.cancel = context.WithCancel(context.Background())
|
||||
@ -129,7 +130,7 @@ func (t *volumeStressTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
createPodsAndVolumes := func() {
|
||||
for i := 0; i < l.testOptions.NumPods; i++ {
|
||||
framework.Logf("Creating resources for pod %v/%v", i, l.testOptions.NumPods-1)
|
||||
r := CreateVolumeResource(driver, l.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange)
|
||||
r := storageapi.CreateVolumeResource(driver, l.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange)
|
||||
l.resources = append(l.resources, r)
|
||||
podConfig := e2epod.Config{
|
||||
NS: f.Namespace.Name,
|
||||
@ -160,7 +161,7 @@ func (t *volumeStressTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
errs = append(errs, resource.CleanupResource())
|
||||
}
|
||||
|
||||
errs = append(errs, tryFunc(l.driverCleanup))
|
||||
errs = append(errs, storageutils.TryFunc(l.driverCleanup))
|
||||
framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource")
|
||||
l.migrationCheck.validateMigrationVolumeOpCounts()
|
||||
}
|
||||
|
@ -38,11 +38,12 @@ import (
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
type volumeLimitsTestSuite struct {
|
||||
tsInfo TestSuiteInfo
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
}
|
||||
|
||||
const (
|
||||
@ -55,13 +56,13 @@ const (
|
||||
csiNodeInfoTimeout = 1 * time.Minute
|
||||
)
|
||||
|
||||
var _ TestSuite = &volumeLimitsTestSuite{}
|
||||
var _ storageapi.TestSuite = &volumeLimitsTestSuite{}
|
||||
|
||||
// InitCustomVolumeLimitsTestSuite returns volumeLimitsTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomVolumeLimitsTestSuite(patterns []testpatterns.TestPattern) TestSuite {
|
||||
func InitCustomVolumeLimitsTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
return &volumeLimitsTestSuite{
|
||||
tsInfo: TestSuiteInfo{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
Name: "volumeLimits",
|
||||
TestPatterns: patterns,
|
||||
},
|
||||
@ -70,29 +71,29 @@ func InitCustomVolumeLimitsTestSuite(patterns []testpatterns.TestPattern) TestSu
|
||||
|
||||
// InitVolumeLimitsTestSuite returns volumeLimitsTestSuite that implements TestSuite interface
|
||||
// using testsuite default patterns
|
||||
func InitVolumeLimitsTestSuite() TestSuite {
|
||||
patterns := []testpatterns.TestPattern{
|
||||
testpatterns.FsVolModeDynamicPV,
|
||||
func InitVolumeLimitsTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.FsVolModeDynamicPV,
|
||||
}
|
||||
return InitCustomVolumeLimitsTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (t *volumeLimitsTestSuite) GetTestSuiteInfo() TestSuiteInfo {
|
||||
func (t *volumeLimitsTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
return t.tsInfo
|
||||
}
|
||||
|
||||
func (t *volumeLimitsTestSuite) SkipUnsupportedTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (t *volumeLimitsTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
}
|
||||
|
||||
func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (t *volumeLimitsTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
type local struct {
|
||||
config *PerTestConfig
|
||||
config *storageapi.PerTestConfig
|
||||
testCleanup func()
|
||||
|
||||
cs clientset.Interface
|
||||
ns *v1.Namespace
|
||||
// VolumeResource contains pv, pvc, sc, etc. of the first pod created
|
||||
resource *VolumeResource
|
||||
resource *storageapi.VolumeResource
|
||||
|
||||
// All created PVCs, incl. the one in resource
|
||||
pvcs []*v1.PersistentVolumeClaim
|
||||
@ -109,7 +110,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volumelimits", getDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volumelimits", storageapi.GetDriverTimeouts(driver))
|
||||
|
||||
// This checks that CSIMaxVolumeLimitChecker works as expected.
|
||||
// A randomly chosen node should be able to handle as many CSI volumes as
|
||||
@ -121,11 +122,11 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
// BEWARE: the test may create lot of volumes and it's really slow.
|
||||
ginkgo.It("should support volume limits [Serial]", func() {
|
||||
driverInfo := driver.GetDriverInfo()
|
||||
if !driverInfo.Capabilities[CapVolumeLimits] {
|
||||
if !driverInfo.Capabilities[storageapi.CapVolumeLimits] {
|
||||
ginkgo.Skip(fmt.Sprintf("driver %s does not support volume limits", driverInfo.Name))
|
||||
}
|
||||
var dDriver DynamicPVTestDriver
|
||||
if dDriver = driver.(DynamicPVTestDriver); dDriver == nil {
|
||||
var dDriver storageapi.DynamicPVTestDriver
|
||||
if dDriver = driver.(storageapi.DynamicPVTestDriver); dDriver == nil {
|
||||
framework.Failf("Test driver does not provide dynamically created volumes")
|
||||
}
|
||||
|
||||
@ -154,10 +155,10 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
|
||||
// Create a storage class and generate a PVC. Do not instantiate the PVC yet, keep it for the last pod.
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange
|
||||
claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
|
||||
claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
|
||||
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, dDriver)
|
||||
|
||||
l.resource = CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.resource = storageapi.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
defer func() {
|
||||
err := l.resource.CleanupResource()
|
||||
framework.ExpectNoError(err, "while cleaning up resource")
|
||||
@ -315,14 +316,14 @@ func waitForAllPVCsBound(cs clientset.Interface, timeout time.Duration, pvcs []*
|
||||
return pvNames, nil
|
||||
}
|
||||
|
||||
func getNodeLimits(cs clientset.Interface, config *PerTestConfig, nodeName string, driverInfo *DriverInfo) (int, error) {
|
||||
func getNodeLimits(cs clientset.Interface, config *storageapi.PerTestConfig, nodeName string, driverInfo *storageapi.DriverInfo) (int, error) {
|
||||
if len(driverInfo.InTreePluginName) == 0 {
|
||||
return getCSINodeLimits(cs, config, nodeName, driverInfo)
|
||||
}
|
||||
return getInTreeNodeLimits(cs, nodeName, driverInfo)
|
||||
}
|
||||
|
||||
func getInTreeNodeLimits(cs clientset.Interface, nodeName string, driverInfo *DriverInfo) (int, error) {
|
||||
func getInTreeNodeLimits(cs clientset.Interface, nodeName string, driverInfo *storageapi.DriverInfo) (int, error) {
|
||||
node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@ -349,7 +350,7 @@ func getInTreeNodeLimits(cs clientset.Interface, nodeName string, driverInfo *Dr
|
||||
return int(limit.Value()), nil
|
||||
}
|
||||
|
||||
func getCSINodeLimits(cs clientset.Interface, config *PerTestConfig, nodeName string, driverInfo *DriverInfo) (int, error) {
|
||||
func getCSINodeLimits(cs clientset.Interface, config *storageapi.PerTestConfig, nodeName string, driverInfo *storageapi.DriverInfo) (int, error) {
|
||||
// Retry with a timeout, the driver might just have been installed and kubelet takes a while to publish everything.
|
||||
var limit int
|
||||
err := wait.PollImmediate(2*time.Second, csiNodeInfoTimeout, func() (bool, error) {
|
||||
|
@ -39,8 +39,8 @@ import (
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -49,16 +49,16 @@ const (
|
||||
)
|
||||
|
||||
type volumeModeTestSuite struct {
|
||||
tsInfo TestSuiteInfo
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
}
|
||||
|
||||
var _ TestSuite = &volumeModeTestSuite{}
|
||||
var _ storageapi.TestSuite = &volumeModeTestSuite{}
|
||||
|
||||
// InitCustomVolumeModeTestSuite returns volumeModeTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomVolumeModeTestSuite(patterns []testpatterns.TestPattern) TestSuite {
|
||||
func InitCustomVolumeModeTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
return &volumeModeTestSuite{
|
||||
tsInfo: TestSuiteInfo{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
Name: "volumeMode",
|
||||
TestPatterns: patterns,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
@ -70,32 +70,32 @@ func InitCustomVolumeModeTestSuite(patterns []testpatterns.TestPattern) TestSuit
|
||||
|
||||
// InitVolumeModeTestSuite returns volumeModeTestSuite that implements TestSuite interface
|
||||
// using testsuite default patterns
|
||||
func InitVolumeModeTestSuite() TestSuite {
|
||||
patterns := []testpatterns.TestPattern{
|
||||
testpatterns.FsVolModePreprovisionedPV,
|
||||
testpatterns.FsVolModeDynamicPV,
|
||||
testpatterns.BlockVolModePreprovisionedPV,
|
||||
testpatterns.BlockVolModeDynamicPV,
|
||||
func InitVolumeModeTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
storageapi.FsVolModePreprovisionedPV,
|
||||
storageapi.FsVolModeDynamicPV,
|
||||
storageapi.BlockVolModePreprovisionedPV,
|
||||
storageapi.BlockVolModeDynamicPV,
|
||||
}
|
||||
return InitCustomVolumeModeTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (t *volumeModeTestSuite) GetTestSuiteInfo() TestSuiteInfo {
|
||||
func (t *volumeModeTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
return t.tsInfo
|
||||
}
|
||||
|
||||
func (t *volumeModeTestSuite) SkipUnsupportedTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (t *volumeModeTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
}
|
||||
|
||||
func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (t *volumeModeTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
type local struct {
|
||||
config *PerTestConfig
|
||||
config *storageapi.PerTestConfig
|
||||
driverCleanup func()
|
||||
|
||||
cs clientset.Interface
|
||||
ns *v1.Namespace
|
||||
// VolumeResource contains pv, pvc, sc, etc., owns cleaning that up
|
||||
VolumeResource
|
||||
storageapi.VolumeResource
|
||||
|
||||
migrationCheck *migrationOpCheck
|
||||
}
|
||||
@ -106,7 +106,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volumemode", getDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volumemode", storageapi.GetDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
@ -131,22 +131,22 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
|
||||
volumeNodeAffinity *v1.VolumeNodeAffinity
|
||||
)
|
||||
|
||||
l.VolumeResource = VolumeResource{
|
||||
l.VolumeResource = storageapi.VolumeResource{
|
||||
Config: l.config,
|
||||
Pattern: pattern,
|
||||
}
|
||||
|
||||
// Create volume for pre-provisioned volume tests
|
||||
l.Volume = CreateVolume(driver, l.config, pattern.VolType)
|
||||
l.Volume = storageapi.CreateVolume(driver, l.config, pattern.VolType)
|
||||
|
||||
switch pattern.VolType {
|
||||
case testpatterns.PreprovisionedPV:
|
||||
case storageapi.PreprovisionedPV:
|
||||
if pattern.VolMode == v1.PersistentVolumeBlock {
|
||||
scName = fmt.Sprintf("%s-%s-sc-for-block", l.ns.Name, dInfo.Name)
|
||||
} else if pattern.VolMode == v1.PersistentVolumeFilesystem {
|
||||
scName = fmt.Sprintf("%s-%s-sc-for-file", l.ns.Name, dInfo.Name)
|
||||
}
|
||||
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
|
||||
if pDriver, ok := driver.(storageapi.PreprovisionedPVTestDriver); ok {
|
||||
pvSource, volumeNodeAffinity = pDriver.GetPersistentVolumeSource(false, fsType, l.Volume)
|
||||
if pvSource == nil {
|
||||
e2eskipper.Skipf("Driver %q does not define PersistentVolumeSource - skipping", dInfo.Name)
|
||||
@ -157,8 +157,8 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
|
||||
l.Pv = e2epv.MakePersistentVolume(pvConfig)
|
||||
l.Pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, l.ns.Name)
|
||||
}
|
||||
case testpatterns.DynamicPV:
|
||||
if dDriver, ok := driver.(DynamicPVTestDriver); ok {
|
||||
case storageapi.DynamicPV:
|
||||
if dDriver, ok := driver.(storageapi.DynamicPVTestDriver); ok {
|
||||
l.Sc = dDriver.GetDynamicProvisionStorageClass(l.config, fsType)
|
||||
if l.Sc == nil {
|
||||
e2eskipper.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name)
|
||||
@ -166,7 +166,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
|
||||
l.Sc.VolumeBindingMode = &volBindMode
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
driverVolumeSizeRange := dInfo.SupportedSizeRange
|
||||
claimSize, err := getSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
|
||||
claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
|
||||
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)
|
||||
|
||||
l.Pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
||||
@ -183,16 +183,16 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
|
||||
cleanup := func() {
|
||||
var errs []error
|
||||
errs = append(errs, l.CleanupResource())
|
||||
errs = append(errs, tryFunc(l.driverCleanup))
|
||||
errs = append(errs, storageutils.TryFunc(l.driverCleanup))
|
||||
l.driverCleanup = nil
|
||||
framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource")
|
||||
l.migrationCheck.validateMigrationVolumeOpCounts()
|
||||
}
|
||||
|
||||
// We register different tests depending on the drive
|
||||
isBlockSupported := dInfo.Capabilities[CapBlock]
|
||||
isBlockSupported := dInfo.Capabilities[storageapi.CapBlock]
|
||||
switch pattern.VolType {
|
||||
case testpatterns.PreprovisionedPV:
|
||||
case storageapi.PreprovisionedPV:
|
||||
if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
|
||||
ginkgo.It("should fail to create pod by failing to mount volume [Slow]", func() {
|
||||
manualInit()
|
||||
@ -253,7 +253,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
|
||||
})
|
||||
}
|
||||
|
||||
case testpatterns.DynamicPV:
|
||||
case storageapi.DynamicPV:
|
||||
if pattern.VolMode == v1.PersistentVolumeBlock && !isBlockSupported {
|
||||
ginkgo.It("should fail in binding dynamic provisioned PV to PVC [Slow][LinuxOnly]", func() {
|
||||
manualInit()
|
||||
@ -297,7 +297,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
|
||||
skipTestIfBlockNotSupported(driver)
|
||||
init()
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
l.VolumeResource = *CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.VolumeResource = *storageapi.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
defer cleanup()
|
||||
|
||||
ginkgo.By("Creating pod")
|
||||
@ -354,7 +354,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
|
||||
}
|
||||
init()
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
l.VolumeResource = *CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.VolumeResource = *storageapi.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
defer cleanup()
|
||||
|
||||
ginkgo.By("Creating pod")
|
||||
@ -391,7 +391,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Listing mounted volumes in the pod")
|
||||
hostExec := utils.NewHostExec(f)
|
||||
hostExec := storageutils.NewHostExec(f)
|
||||
defer hostExec.Cleanup()
|
||||
volumePaths, devicePaths, err := listPodVolumePluginDirectory(hostExec, pod, node)
|
||||
framework.ExpectNoError(err)
|
||||
@ -473,7 +473,7 @@ func swapVolumeMode(podTemplate *v1.Pod) *v1.Pod {
|
||||
// Sample output:
|
||||
// /var/lib/kubelet/pods/a4717a30-000a-4081-a7a8-f51adf280036/volumes/kubernetes.io~secret/default-token-rphdt
|
||||
// /var/lib/kubelet/pods/4475b7a3-4a55-4716-9119-fd0053d9d4a6/volumeDevices/kubernetes.io~aws-ebs/pvc-5f9f80f5-c90b-4586-9966-83f91711e1c0
|
||||
func listPodVolumePluginDirectory(h utils.HostExec, pod *v1.Pod, node *v1.Node) (mounts []string, devices []string, err error) {
|
||||
func listPodVolumePluginDirectory(h storageutils.HostExec, pod *v1.Pod, node *v1.Node) (mounts []string, devices []string, err error) {
|
||||
mountPath := filepath.Join("/var/lib/kubelet/pods/", string(pod.UID), "volumes")
|
||||
devicePath := filepath.Join("/var/lib/kubelet/pods/", string(pod.UID), "volumeDevices")
|
||||
|
||||
@ -488,7 +488,7 @@ func listPodVolumePluginDirectory(h utils.HostExec, pod *v1.Pod, node *v1.Node)
|
||||
return mounts, devices, nil
|
||||
}
|
||||
|
||||
func listPodDirectory(h utils.HostExec, path string, node *v1.Node) ([]string, error) {
|
||||
func listPodDirectory(h storageutils.HostExec, path string, node *v1.Node) ([]string, error) {
|
||||
// Return no error if the directory does not exist (e.g. there are no block volumes used)
|
||||
_, err := h.IssueCommandWithResult("test ! -d "+path, node)
|
||||
if err == nil {
|
||||
|
@ -34,21 +34,22 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||
storageapi "k8s.io/kubernetes/test/e2e/storage/api"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
type volumesTestSuite struct {
|
||||
tsInfo TestSuiteInfo
|
||||
tsInfo storageapi.TestSuiteInfo
|
||||
}
|
||||
|
||||
var _ TestSuite = &volumesTestSuite{}
|
||||
var _ storageapi.TestSuite = &volumesTestSuite{}
|
||||
|
||||
// InitCustomVolumesTestSuite returns volumesTestSuite that implements TestSuite interface
|
||||
// using custom test patterns
|
||||
func InitCustomVolumesTestSuite(patterns []testpatterns.TestPattern) TestSuite {
|
||||
func InitCustomVolumesTestSuite(patterns []storageapi.TestPattern) storageapi.TestSuite {
|
||||
return &volumesTestSuite{
|
||||
tsInfo: TestSuiteInfo{
|
||||
tsInfo: storageapi.TestSuiteInfo{
|
||||
Name: "volumes",
|
||||
TestPatterns: patterns,
|
||||
SupportedSizeRange: e2evolume.SizeRange{
|
||||
@ -60,65 +61,65 @@ func InitCustomVolumesTestSuite(patterns []testpatterns.TestPattern) TestSuite {
|
||||
|
||||
// InitVolumesTestSuite returns volumesTestSuite that implements TestSuite interface
|
||||
// using testsuite default patterns
|
||||
func InitVolumesTestSuite() TestSuite {
|
||||
patterns := []testpatterns.TestPattern{
|
||||
func InitVolumesTestSuite() storageapi.TestSuite {
|
||||
patterns := []storageapi.TestPattern{
|
||||
// Default fsType
|
||||
testpatterns.DefaultFsInlineVolume,
|
||||
testpatterns.DefaultFsPreprovisionedPV,
|
||||
testpatterns.DefaultFsDynamicPV,
|
||||
storageapi.DefaultFsInlineVolume,
|
||||
storageapi.DefaultFsPreprovisionedPV,
|
||||
storageapi.DefaultFsDynamicPV,
|
||||
// ext3
|
||||
testpatterns.Ext3InlineVolume,
|
||||
testpatterns.Ext3PreprovisionedPV,
|
||||
testpatterns.Ext3DynamicPV,
|
||||
storageapi.Ext3InlineVolume,
|
||||
storageapi.Ext3PreprovisionedPV,
|
||||
storageapi.Ext3DynamicPV,
|
||||
// ext4
|
||||
testpatterns.Ext4InlineVolume,
|
||||
testpatterns.Ext4PreprovisionedPV,
|
||||
testpatterns.Ext4DynamicPV,
|
||||
storageapi.Ext4InlineVolume,
|
||||
storageapi.Ext4PreprovisionedPV,
|
||||
storageapi.Ext4DynamicPV,
|
||||
// xfs
|
||||
testpatterns.XfsInlineVolume,
|
||||
testpatterns.XfsPreprovisionedPV,
|
||||
testpatterns.XfsDynamicPV,
|
||||
storageapi.XfsInlineVolume,
|
||||
storageapi.XfsPreprovisionedPV,
|
||||
storageapi.XfsDynamicPV,
|
||||
// ntfs
|
||||
testpatterns.NtfsInlineVolume,
|
||||
testpatterns.NtfsPreprovisionedPV,
|
||||
testpatterns.NtfsDynamicPV,
|
||||
storageapi.NtfsInlineVolume,
|
||||
storageapi.NtfsPreprovisionedPV,
|
||||
storageapi.NtfsDynamicPV,
|
||||
// block volumes
|
||||
testpatterns.BlockVolModePreprovisionedPV,
|
||||
testpatterns.BlockVolModeDynamicPV,
|
||||
storageapi.BlockVolModePreprovisionedPV,
|
||||
storageapi.BlockVolModeDynamicPV,
|
||||
}
|
||||
return InitCustomVolumesTestSuite(patterns)
|
||||
}
|
||||
|
||||
func (t *volumesTestSuite) GetTestSuiteInfo() TestSuiteInfo {
|
||||
func (t *volumesTestSuite) GetTestSuiteInfo() storageapi.TestSuiteInfo {
|
||||
return t.tsInfo
|
||||
}
|
||||
|
||||
func (t *volumesTestSuite) SkipUnsupportedTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (t *volumesTestSuite) SkipUnsupportedTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
if pattern.VolMode == v1.PersistentVolumeBlock {
|
||||
skipTestIfBlockNotSupported(driver)
|
||||
}
|
||||
}
|
||||
|
||||
func skipExecTest(driver TestDriver) {
|
||||
func skipExecTest(driver storageapi.TestDriver) {
|
||||
dInfo := driver.GetDriverInfo()
|
||||
if !dInfo.Capabilities[CapExec] {
|
||||
if !dInfo.Capabilities[storageapi.CapExec] {
|
||||
e2eskipper.Skipf("Driver %q does not support exec - skipping", dInfo.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func skipTestIfBlockNotSupported(driver TestDriver) {
|
||||
func skipTestIfBlockNotSupported(driver storageapi.TestDriver) {
|
||||
dInfo := driver.GetDriverInfo()
|
||||
if !dInfo.Capabilities[CapBlock] {
|
||||
if !dInfo.Capabilities[storageapi.CapBlock] {
|
||||
e2eskipper.Skipf("Driver %q does not provide raw block - skipping", dInfo.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *volumesTestSuite) DefineTests(driver TestDriver, pattern testpatterns.TestPattern) {
|
||||
func (t *volumesTestSuite) DefineTests(driver storageapi.TestDriver, pattern storageapi.TestPattern) {
|
||||
type local struct {
|
||||
config *PerTestConfig
|
||||
config *storageapi.PerTestConfig
|
||||
driverCleanup func()
|
||||
|
||||
resource *VolumeResource
|
||||
resource *storageapi.VolumeResource
|
||||
|
||||
migrationCheck *migrationOpCheck
|
||||
}
|
||||
@ -127,7 +128,7 @@ func (t *volumesTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
|
||||
|
||||
// Beware that it also registers an AfterEach which renders f unusable. Any code using
|
||||
// f must run inside an It or Context callback.
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volume", getDriverTimeouts(driver))
|
||||
f := framework.NewFrameworkWithCustomTimeouts("volume", storageapi.GetDriverTimeouts(driver))
|
||||
|
||||
init := func() {
|
||||
l = local{}
|
||||
@ -136,7 +137,7 @@ func (t *volumesTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
|
||||
l.config, l.driverCleanup = driver.PrepareTest(f)
|
||||
l.migrationCheck = newMigrationOpCheck(f.ClientSet, dInfo.InTreePluginName)
|
||||
testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange
|
||||
l.resource = CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
l.resource = storageapi.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
||||
if l.resource.VolSource == nil {
|
||||
e2eskipper.Skipf("Driver %q does not define volumeSource - skipping", dInfo.Name)
|
||||
}
|
||||
@ -149,7 +150,7 @@ func (t *volumesTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
|
||||
l.resource = nil
|
||||
}
|
||||
|
||||
errs = append(errs, tryFunc(l.driverCleanup))
|
||||
errs = append(errs, storageutils.TryFunc(l.driverCleanup))
|
||||
l.driverCleanup = nil
|
||||
framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource")
|
||||
l.migrationCheck.validateMigrationVolumeOpCounts()
|
||||
@ -158,7 +159,7 @@ func (t *volumesTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
|
||||
ginkgo.It("should store data", func() {
|
||||
init()
|
||||
defer func() {
|
||||
e2evolume.TestServerCleanup(f, convertTestConfig(l.config))
|
||||
e2evolume.TestServerCleanup(f, storageapi.ConvertTestConfig(l.config))
|
||||
cleanup()
|
||||
}()
|
||||
|
||||
@ -172,9 +173,9 @@ func (t *volumesTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
|
||||
dInfo.Name, f.Namespace.Name),
|
||||
},
|
||||
}
|
||||
config := convertTestConfig(l.config)
|
||||
config := storageapi.ConvertTestConfig(l.config)
|
||||
var fsGroup *int64
|
||||
if framework.NodeOSDistroIs("windows") && dInfo.Capabilities[CapFsGroup] {
|
||||
if framework.NodeOSDistroIs("windows") && dInfo.Capabilities[storageapi.CapFsGroup] {
|
||||
fsGroupVal := int64(1234)
|
||||
fsGroup = &fsGroupVal
|
||||
}
|
||||
@ -183,7 +184,7 @@ func (t *volumesTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
|
||||
// and we don't have reliable way to detect volumes are unmounted or
|
||||
// not before starting the second pod.
|
||||
e2evolume.InjectContent(f, config, fsGroup, pattern.FsType, tests)
|
||||
if driver.GetDriverInfo().Capabilities[CapPersistence] {
|
||||
if driver.GetDriverInfo().Capabilities[storageapi.CapPersistence] {
|
||||
e2evolume.TestVolumeClient(f, config, fsGroup, pattern.FsType, tests)
|
||||
} else {
|
||||
ginkgo.By("Skipping persistence check for non-persistent volume")
|
||||
@ -206,7 +207,7 @@ func testScriptInPod(
|
||||
f *framework.Framework,
|
||||
volumeType string,
|
||||
source *v1.VolumeSource,
|
||||
config *PerTestConfig) {
|
||||
config *storageapi.PerTestConfig) {
|
||||
|
||||
const (
|
||||
volPath = "/vol1"
|
||||
|
@ -1,6 +1,6 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@ -11,6 +11,8 @@ go_library(
|
||||
"framework.go",
|
||||
"host_exec.go",
|
||||
"local.go",
|
||||
"pod.go",
|
||||
"snapshot.go",
|
||||
"utils.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/storage/utils",
|
||||
@ -20,6 +22,7 @@ go_library(
|
||||
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
@ -37,6 +40,8 @@ go_library(
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
"//test/e2e/storage/podlogs:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/aws:go_default_library",
|
||||
"//vendor/github.com/aws/aws-sdk-go/service/ec2:go_default_library",
|
||||
@ -44,7 +49,6 @@ go_library(
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/github.com/pkg/errors:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -60,3 +64,10 @@ filegroup(
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["utils_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = ["//test/e2e/framework/volume:go_default_library"],
|
||||
)
|
||||
|
182
test/e2e/storage/utils/pod.go
Normal file
182
test/e2e/storage/utils/pod.go
Normal file
@ -0,0 +1,182 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
"k8s.io/kubernetes/test/e2e/storage/podlogs"
|
||||
)
|
||||
|
||||
// StartPodLogs begins capturing log output and events from current
|
||||
// and future pods running in the namespace of the framework. That
|
||||
// ends when the returned cleanup function is called.
|
||||
//
|
||||
// The output goes to log files (when using --report-dir, as in the
|
||||
// CI) or the output stream (otherwise).
|
||||
func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cs := f.ClientSet
|
||||
|
||||
ns := driverNamespace.Name
|
||||
|
||||
to := podlogs.LogOutput{
|
||||
StatusWriter: ginkgo.GinkgoWriter,
|
||||
}
|
||||
if framework.TestContext.ReportDir == "" {
|
||||
to.LogWriter = ginkgo.GinkgoWriter
|
||||
} else {
|
||||
test := ginkgo.CurrentGinkgoTestDescription()
|
||||
// Clean up each individual component text such that
|
||||
// it contains only characters that are valid as file
|
||||
// name.
|
||||
reg := regexp.MustCompile("[^a-zA-Z0-9_-]+")
|
||||
var components []string
|
||||
for _, component := range test.ComponentTexts {
|
||||
components = append(components, reg.ReplaceAllString(component, "_"))
|
||||
}
|
||||
// We end the prefix with a slash to ensure that all logs
|
||||
// end up in a directory named after the current test.
|
||||
//
|
||||
// Each component name maps to a directory. This
|
||||
// avoids cluttering the root artifact directory and
|
||||
// keeps each directory name smaller (the full test
|
||||
// name at one point exceeded 256 characters, which was
|
||||
// too much for some filesystems).
|
||||
to.LogPathPrefix = framework.TestContext.ReportDir + "/" +
|
||||
strings.Join(components, "/") + "/"
|
||||
}
|
||||
podlogs.CopyAllLogs(ctx, cs, ns, to)
|
||||
|
||||
// pod events are something that the framework already collects itself
|
||||
// after a failed test. Logging them live is only useful for interactive
|
||||
// debugging, not when we collect reports.
|
||||
if framework.TestContext.ReportDir == "" {
|
||||
podlogs.WatchPods(ctx, cs, ns, ginkgo.GinkgoWriter)
|
||||
}
|
||||
|
||||
return cancel
|
||||
}
|
||||
|
||||
// KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits
|
||||
// for the desired statues..
|
||||
// - First issues the command via `systemctl`
|
||||
// - If `systemctl` returns stderr "command not found, issues the command via `service`
|
||||
// - If `service` also returns stderr "command not found", the test is aborted.
|
||||
// Allowed kubeletOps are `KStart`, `KStop`, and `KRestart`
|
||||
func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
|
||||
command := ""
|
||||
systemctlPresent := false
|
||||
kubeletPid := ""
|
||||
|
||||
nodeIP, err := getHostAddress(c, pod)
|
||||
framework.ExpectNoError(err)
|
||||
nodeIP = nodeIP + ":22"
|
||||
|
||||
framework.Logf("Checking if systemctl command is present")
|
||||
sshResult, err := e2essh.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
|
||||
if !strings.Contains(sshResult.Stderr, "command not found") {
|
||||
command = fmt.Sprintf("systemctl %s kubelet", string(kOp))
|
||||
systemctlPresent = true
|
||||
} else {
|
||||
command = fmt.Sprintf("service kubelet %s", string(kOp))
|
||||
}
|
||||
|
||||
sudoPresent := isSudoPresent(nodeIP, framework.TestContext.Provider)
|
||||
if sudoPresent {
|
||||
command = fmt.Sprintf("sudo %s", command)
|
||||
}
|
||||
|
||||
if kOp == KRestart {
|
||||
kubeletPid = getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
|
||||
}
|
||||
|
||||
framework.Logf("Attempting `%s`", command)
|
||||
sshResult, err = e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
|
||||
e2essh.LogResult(sshResult)
|
||||
gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult)
|
||||
|
||||
if kOp == KStop {
|
||||
if ok := e2enode.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
|
||||
framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName)
|
||||
}
|
||||
}
|
||||
if kOp == KRestart {
|
||||
// Wait for a minute to check if kubelet Pid is getting changed
|
||||
isPidChanged := false
|
||||
for start := time.Now(); time.Since(start) < 1*time.Minute; time.Sleep(2 * time.Second) {
|
||||
kubeletPidAfterRestart := getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
|
||||
if kubeletPid != kubeletPidAfterRestart {
|
||||
isPidChanged = true
|
||||
break
|
||||
}
|
||||
}
|
||||
framework.ExpectEqual(isPidChanged, true, "Kubelet PID remained unchanged after restarting Kubelet")
|
||||
framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
|
||||
time.Sleep(30 * time.Second)
|
||||
}
|
||||
if kOp == KStart || kOp == KRestart {
|
||||
// For kubelet start and restart operations, Wait until Node becomes Ready
|
||||
if ok := e2enode.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
|
||||
framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getHostAddress gets the node for a pod and returns the first
|
||||
// address. Returns an error if the node the pod is on doesn't have an
|
||||
// address.
|
||||
func getHostAddress(client clientset.Interface, p *v1.Pod) (string, error) {
|
||||
node, err := client.CoreV1().Nodes().Get(context.TODO(), p.Spec.NodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// Try externalAddress first
|
||||
for _, address := range node.Status.Addresses {
|
||||
if address.Type == v1.NodeExternalIP {
|
||||
if address.Address != "" {
|
||||
return address.Address, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// If no externalAddress found, try internalAddress
|
||||
for _, address := range node.Status.Addresses {
|
||||
if address.Type == v1.NodeInternalIP {
|
||||
if address.Address != "" {
|
||||
return address.Address, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If not found, return error
|
||||
return "", fmt.Errorf("No address for pod %v on node %v",
|
||||
p.Name, p.Spec.NodeName)
|
||||
}
|
75
test/e2e/storage/utils/snapshot.go
Normal file
75
test/e2e/storage/utils/snapshot.go
Normal file
@ -0,0 +1,75 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
const (
|
||||
// SnapshotGroup is the snapshot CRD api group
|
||||
SnapshotGroup = "snapshot.storage.k8s.io"
|
||||
// SnapshotAPIVersion is the snapshot CRD api version
|
||||
SnapshotAPIVersion = "snapshot.storage.k8s.io/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
// SnapshotGVR is GroupVersionResource for volumesnapshots
|
||||
SnapshotGVR = schema.GroupVersionResource{Group: SnapshotGroup, Version: "v1", Resource: "volumesnapshots"}
|
||||
// SnapshotClassGVR is GroupVersionResource for volumesnapshotclasses
|
||||
SnapshotClassGVR = schema.GroupVersionResource{Group: SnapshotGroup, Version: "v1", Resource: "volumesnapshotclasses"}
|
||||
// SnapshotContentGVR is GroupVersionResource for volumesnapshotcontents
|
||||
SnapshotContentGVR = schema.GroupVersionResource{Group: SnapshotGroup, Version: "v1", Resource: "volumesnapshotcontents"}
|
||||
)
|
||||
|
||||
// WaitForSnapshotReady waits for a VolumeSnapshot to be ready to use or until timeout occurs, whichever comes first.
|
||||
func WaitForSnapshotReady(c dynamic.Interface, ns string, snapshotName string, poll, timeout time.Duration) error {
|
||||
framework.Logf("Waiting up to %v for VolumeSnapshot %s to become ready", timeout, snapshotName)
|
||||
|
||||
if successful := WaitUntil(poll, timeout, func() bool {
|
||||
snapshot, err := c.Resource(SnapshotGVR).Namespace(ns).Get(context.TODO(), snapshotName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get snapshot %q, retrying in %v. Error: %v", snapshotName, poll, err)
|
||||
return false
|
||||
}
|
||||
|
||||
status := snapshot.Object["status"]
|
||||
if status == nil {
|
||||
framework.Logf("VolumeSnapshot %s found but is not ready.", snapshotName)
|
||||
return false
|
||||
}
|
||||
value := status.(map[string]interface{})
|
||||
if value["readyToUse"] == true {
|
||||
framework.Logf("VolumeSnapshot %s found and is ready", snapshotName)
|
||||
return true
|
||||
}
|
||||
|
||||
framework.Logf("VolumeSnapshot %s found but is not ready.", snapshotName)
|
||||
return false
|
||||
}); successful {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("VolumeSnapshot %s is not ready within %v", snapshotName, timeout)
|
||||
}
|
@ -21,6 +21,7 @@ import (
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@ -32,6 +33,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@ -39,13 +41,11 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/dynamic"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientexec "k8s.io/client-go/util/exec"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
uexec "k8s.io/utils/exec"
|
||||
)
|
||||
|
||||
// KubeletOpt type definition
|
||||
@ -59,7 +59,9 @@ const (
|
||||
// KStop defines stop value
|
||||
KStop KubeletOpt = "stop"
|
||||
// KRestart defines restart value
|
||||
KRestart KubeletOpt = "restart"
|
||||
KRestart KubeletOpt = "restart"
|
||||
minValidSize string = "1Ki"
|
||||
maxValidSize string = "10Ei"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -67,37 +69,10 @@ const (
|
||||
podSecurityPolicyPrivilegedClusterRoleName = "e2e-test-privileged-psp"
|
||||
)
|
||||
|
||||
// PodExec runs f.ExecCommandInContainerWithFullOutput to execute a shell cmd in target pod
|
||||
func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, string, error) {
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "powershell", "/c", shExec)
|
||||
}
|
||||
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
|
||||
|
||||
}
|
||||
|
||||
// VerifyExecInPodSucceed verifies shell cmd in target pod succeed
|
||||
func VerifyExecInPodSucceed(f *framework.Framework, pod *v1.Pod, shExec string) {
|
||||
stdout, stderr, err := PodExec(f, pod, shExec)
|
||||
if err != nil {
|
||||
|
||||
if exiterr, ok := err.(uexec.CodeExitError); ok {
|
||||
exitCode := exiterr.ExitStatus()
|
||||
framework.ExpectNoError(err,
|
||||
"%q should succeed, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
|
||||
shExec, exitCode, exiterr, stdout, stderr)
|
||||
} else {
|
||||
framework.ExpectNoError(err,
|
||||
"%q should succeed, but failed with error message %q\nstdout: %s\nstderr: %s",
|
||||
shExec, err, stdout, stderr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// VerifyFSGroupInPod verifies that the passed in filePath contains the expectedFSGroup
|
||||
func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string, pod *v1.Pod) {
|
||||
cmd := fmt.Sprintf("ls -l %s", filePath)
|
||||
stdout, stderr, err := PodExec(f, pod, cmd)
|
||||
stdout, stderr, err := e2evolume.PodExec(f, pod, cmd)
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr)
|
||||
fsGroupResult := strings.Fields(stdout)[3]
|
||||
@ -105,131 +80,6 @@ func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string
|
||||
"Expected fsGroup of %s, got %s", expectedFSGroup, fsGroupResult)
|
||||
}
|
||||
|
||||
// VerifyExecInPodFail verifies shell cmd in target pod fail with certain exit code
|
||||
func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exitCode int) {
|
||||
stdout, stderr, err := PodExec(f, pod, shExec)
|
||||
if err != nil {
|
||||
if exiterr, ok := err.(clientexec.ExitError); ok {
|
||||
actualExitCode := exiterr.ExitStatus()
|
||||
framework.ExpectEqual(actualExitCode, exitCode,
|
||||
"%q should fail with exit code %d, but failed with exit code %d and error message %q\nstdout: %s\nstderr: %s",
|
||||
shExec, exitCode, actualExitCode, exiterr, stdout, stderr)
|
||||
} else {
|
||||
framework.ExpectNoError(err,
|
||||
"%q should fail with exit code %d, but failed with error message %q\nstdout: %s\nstderr: %s",
|
||||
shExec, exitCode, err, stdout, stderr)
|
||||
}
|
||||
}
|
||||
framework.ExpectError(err, "%q should fail with exit code %d, but exit without error", shExec, exitCode)
|
||||
}
|
||||
|
||||
func isSudoPresent(nodeIP string, provider string) bool {
|
||||
framework.Logf("Checking if sudo command is present")
|
||||
sshResult, err := e2essh.SSH("sudo --version", nodeIP, provider)
|
||||
framework.ExpectNoError(err, "SSH to %q errored.", nodeIP)
|
||||
if !strings.Contains(sshResult.Stderr, "command not found") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getHostAddress gets the node for a pod and returns the first
|
||||
// address. Returns an error if the node the pod is on doesn't have an
|
||||
// address.
|
||||
func getHostAddress(client clientset.Interface, p *v1.Pod) (string, error) {
|
||||
node, err := client.CoreV1().Nodes().Get(context.TODO(), p.Spec.NodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
// Try externalAddress first
|
||||
for _, address := range node.Status.Addresses {
|
||||
if address.Type == v1.NodeExternalIP {
|
||||
if address.Address != "" {
|
||||
return address.Address, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// If no externalAddress found, try internalAddress
|
||||
for _, address := range node.Status.Addresses {
|
||||
if address.Type == v1.NodeInternalIP {
|
||||
if address.Address != "" {
|
||||
return address.Address, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If not found, return error
|
||||
return "", fmt.Errorf("No address for pod %v on node %v",
|
||||
p.Name, p.Spec.NodeName)
|
||||
}
|
||||
|
||||
// KubeletCommand performs `start`, `restart`, or `stop` on the kubelet running on the node of the target pod and waits
|
||||
// for the desired statues..
|
||||
// - First issues the command via `systemctl`
|
||||
// - If `systemctl` returns stderr "command not found, issues the command via `service`
|
||||
// - If `service` also returns stderr "command not found", the test is aborted.
|
||||
// Allowed kubeletOps are `KStart`, `KStop`, and `KRestart`
|
||||
func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) {
|
||||
command := ""
|
||||
systemctlPresent := false
|
||||
kubeletPid := ""
|
||||
|
||||
nodeIP, err := getHostAddress(c, pod)
|
||||
framework.ExpectNoError(err)
|
||||
nodeIP = nodeIP + ":22"
|
||||
|
||||
framework.Logf("Checking if systemctl command is present")
|
||||
sshResult, err := e2essh.SSH("systemctl --version", nodeIP, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
|
||||
if !strings.Contains(sshResult.Stderr, "command not found") {
|
||||
command = fmt.Sprintf("systemctl %s kubelet", string(kOp))
|
||||
systemctlPresent = true
|
||||
} else {
|
||||
command = fmt.Sprintf("service kubelet %s", string(kOp))
|
||||
}
|
||||
|
||||
sudoPresent := isSudoPresent(nodeIP, framework.TestContext.Provider)
|
||||
if sudoPresent {
|
||||
command = fmt.Sprintf("sudo %s", command)
|
||||
}
|
||||
|
||||
if kOp == KRestart {
|
||||
kubeletPid = getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
|
||||
}
|
||||
|
||||
framework.Logf("Attempting `%s`", command)
|
||||
sshResult, err = e2essh.SSH(command, nodeIP, framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err, fmt.Sprintf("SSH to Node %q errored.", pod.Spec.NodeName))
|
||||
e2essh.LogResult(sshResult)
|
||||
gomega.Expect(sshResult.Code).To(gomega.BeZero(), "Failed to [%s] kubelet:\n%#v", string(kOp), sshResult)
|
||||
|
||||
if kOp == KStop {
|
||||
if ok := e2enode.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
|
||||
framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName)
|
||||
}
|
||||
}
|
||||
if kOp == KRestart {
|
||||
// Wait for a minute to check if kubelet Pid is getting changed
|
||||
isPidChanged := false
|
||||
for start := time.Now(); time.Since(start) < 1*time.Minute; time.Sleep(2 * time.Second) {
|
||||
kubeletPidAfterRestart := getKubeletMainPid(nodeIP, sudoPresent, systemctlPresent)
|
||||
if kubeletPid != kubeletPidAfterRestart {
|
||||
isPidChanged = true
|
||||
break
|
||||
}
|
||||
}
|
||||
framework.ExpectEqual(isPidChanged, true, "Kubelet PID remained unchanged after restarting Kubelet")
|
||||
framework.Logf("Noticed that kubelet PID is changed. Waiting for 30 Seconds for Kubelet to come back")
|
||||
time.Sleep(30 * time.Second)
|
||||
}
|
||||
if kOp == KStart || kOp == KRestart {
|
||||
// For kubelet start and restart operations, Wait until Node becomes Ready
|
||||
if ok := e2enode.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok {
|
||||
framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// getKubeletMainPid return the Main PID of the Kubelet Process
|
||||
func getKubeletMainPid(nodeIP string, sudoPresent bool, systemctlPresent bool) string {
|
||||
command := ""
|
||||
@ -614,46 +464,39 @@ func PrivilegedTestPSPClusterRoleBinding(client clientset.Interface,
|
||||
}
|
||||
}
|
||||
|
||||
// CheckVolumeModeOfPath check mode of volume
|
||||
func CheckVolumeModeOfPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
|
||||
if volMode == v1.PersistentVolumeBlock {
|
||||
// Check if block exists
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -b %s", path))
|
||||
|
||||
// Double check that it's not directory
|
||||
VerifyExecInPodFail(f, pod, fmt.Sprintf("test -d %s", path), 1)
|
||||
} else {
|
||||
// Check if directory exists
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("test -d %s", path))
|
||||
|
||||
// Double check that it's not block
|
||||
VerifyExecInPodFail(f, pod, fmt.Sprintf("test -b %s", path), 1)
|
||||
func isSudoPresent(nodeIP string, provider string) bool {
|
||||
framework.Logf("Checking if sudo command is present")
|
||||
sshResult, err := e2essh.SSH("sudo --version", nodeIP, provider)
|
||||
framework.ExpectNoError(err, "SSH to %q errored.", nodeIP)
|
||||
if !strings.Contains(sshResult.Stderr, "command not found") {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CheckReadWriteToPath check that path can b e read and written
|
||||
func CheckReadWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
|
||||
if volMode == v1.PersistentVolumeBlock {
|
||||
// random -> file1
|
||||
VerifyExecInPodSucceed(f, pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, "dd if=/dev/urandom of=/tmp/file1 bs=64 count=1")
|
||||
// file1 -> dev (write to dev)
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=/tmp/file1 of=%s bs=64 count=1", path))
|
||||
// dev -> file2 (read from dev)
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s of=/tmp/file2 bs=64 count=1", path))
|
||||
// file1 == file2 (check contents)
|
||||
VerifyExecInPodSucceed(f, pod, "diff /tmp/file1 /tmp/file2")
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, "diff /tmp/file1 /tmp/file2")
|
||||
// Clean up temp files
|
||||
VerifyExecInPodSucceed(f, pod, "rm -f /tmp/file1 /tmp/file2")
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, "rm -f /tmp/file1 /tmp/file2")
|
||||
|
||||
// Check that writing file to block volume fails
|
||||
VerifyExecInPodFail(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
|
||||
e2evolume.VerifyExecInPodFail(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path), 1)
|
||||
} else {
|
||||
// text -> file1 (write to file)
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo 'Hello world.' > %s/file1.txt", path))
|
||||
// grep file1 (read from file and check contents)
|
||||
VerifyExecInPodSucceed(f, pod, readFile("Hello word.", path))
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, readFile("Hello word.", path))
|
||||
// Check that writing to directory as block volume fails
|
||||
VerifyExecInPodFail(f, pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
|
||||
e2evolume.VerifyExecInPodFail(f, pod, fmt.Sprintf("dd if=/dev/urandom of=%s bs=64 count=1", path), 1)
|
||||
}
|
||||
}
|
||||
|
||||
@ -699,8 +542,8 @@ func CheckReadFromPath(f *framework.Framework, pod *v1.Pod, volMode v1.Persisten
|
||||
|
||||
sum := sha256.Sum256(genBinDataFromSeed(len, seed))
|
||||
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum", pathForVolMode, iflag, len))
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, iflag, len, sum))
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum", pathForVolMode, iflag, len))
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("dd if=%s %s bs=%d count=1 | sha256sum | grep -Fq %x", pathForVolMode, iflag, len, sum))
|
||||
}
|
||||
|
||||
// CheckWriteToPath that file can be properly written.
|
||||
@ -724,8 +567,8 @@ func CheckWriteToPath(f *framework.Framework, pod *v1.Pod, volMode v1.Persistent
|
||||
|
||||
encoded := base64.StdEncoding.EncodeToString(genBinDataFromSeed(len, seed))
|
||||
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
|
||||
VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s %s bs=%d count=1", encoded, pathForVolMode, oflag, len))
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | sha256sum", encoded))
|
||||
e2evolume.VerifyExecInPodSucceed(f, pod, fmt.Sprintf("echo %s | base64 -d | dd of=%s %s bs=%d count=1", encoded, pathForVolMode, oflag, len))
|
||||
}
|
||||
|
||||
// findMountPoints returns all mount points on given node under specified directory.
|
||||
@ -866,7 +709,7 @@ func WaitForGVRFinalizer(ctx context.Context, c dynamic.Interface, gvr schema.Gr
|
||||
// VerifyFilePathGidInPod verfies expected GID of the target filepath
|
||||
func VerifyFilePathGidInPod(f *framework.Framework, filePath, expectedGid string, pod *v1.Pod) {
|
||||
cmd := fmt.Sprintf("ls -l %s", filePath)
|
||||
stdout, stderr, err := PodExec(f, pod, cmd)
|
||||
stdout, stderr, err := e2evolume.PodExec(f, pod, cmd)
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr)
|
||||
ll := strings.Fields(stdout)
|
||||
@ -877,7 +720,90 @@ func VerifyFilePathGidInPod(f *framework.Framework, filePath, expectedGid string
|
||||
// ChangeFilePathGidInPod changes the GID of the target filepath.
|
||||
func ChangeFilePathGidInPod(f *framework.Framework, filePath, targetGid string, pod *v1.Pod) {
|
||||
cmd := fmt.Sprintf("chgrp %s %s", targetGid, filePath)
|
||||
_, _, err := PodExec(f, pod, cmd)
|
||||
_, _, err := e2evolume.PodExec(f, pod, cmd)
|
||||
framework.ExpectNoError(err)
|
||||
VerifyFilePathGidInPod(f, filePath, targetGid, pod)
|
||||
}
|
||||
|
||||
// DeleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found"
|
||||
func DeleteStorageClass(cs clientset.Interface, className string) error {
|
||||
err := cs.StorageV1().StorageClasses().Delete(context.TODO(), className, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateVolumeSource creates a volume source object
|
||||
func CreateVolumeSource(pvcName string, readOnly bool) *v1.VolumeSource {
|
||||
return &v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvcName,
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TryFunc try to execute the function and return err if there is any
|
||||
func TryFunc(f func()) error {
|
||||
var err error
|
||||
if f == nil {
|
||||
return nil
|
||||
}
|
||||
defer func() {
|
||||
if recoverError := recover(); recoverError != nil {
|
||||
err = fmt.Errorf("%v", recoverError)
|
||||
}
|
||||
}()
|
||||
f()
|
||||
return err
|
||||
}
|
||||
|
||||
// GetSizeRangesIntersection takes two instances of storage size ranges and determines the
|
||||
// intersection of the intervals (if it exists) and return the minimum of the intersection
|
||||
// to be used as the claim size for the test.
|
||||
// if value not set, that means there's no minimum or maximum size limitation and we set default size for it.
|
||||
func GetSizeRangesIntersection(first e2evolume.SizeRange, second e2evolume.SizeRange) (string, error) {
|
||||
var firstMin, firstMax, secondMin, secondMax resource.Quantity
|
||||
var err error
|
||||
|
||||
//if SizeRange is not set, assign a minimum or maximum size
|
||||
if len(first.Min) == 0 {
|
||||
first.Min = minValidSize
|
||||
}
|
||||
if len(first.Max) == 0 {
|
||||
first.Max = maxValidSize
|
||||
}
|
||||
if len(second.Min) == 0 {
|
||||
second.Min = minValidSize
|
||||
}
|
||||
if len(second.Max) == 0 {
|
||||
second.Max = maxValidSize
|
||||
}
|
||||
|
||||
if firstMin, err = resource.ParseQuantity(first.Min); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if firstMax, err = resource.ParseQuantity(first.Max); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if secondMin, err = resource.ParseQuantity(second.Min); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if secondMax, err = resource.ParseQuantity(second.Max); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
interSectionStart := math.Max(float64(firstMin.Value()), float64(secondMin.Value()))
|
||||
intersectionEnd := math.Min(float64(firstMax.Value()), float64(secondMax.Value()))
|
||||
|
||||
// the minimum of the intersection shall be returned as the claim size
|
||||
var intersectionMin resource.Quantity
|
||||
|
||||
if intersectionEnd-interSectionStart >= 0 { //have intersection
|
||||
intersectionMin = *resource.NewQuantity(int64(interSectionStart), "BinarySI") //convert value to BinarySI format. E.g. 5Gi
|
||||
// return the minimum of the intersection as the claim size
|
||||
return intersectionMin.String(), nil
|
||||
}
|
||||
return "", fmt.Errorf("intersection of size ranges %+v, %+v is null", first, second)
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testsuites
|
||||
package utils
|
||||
|
||||
import (
|
||||
"testing"
|
||||
@ -41,7 +41,7 @@ import (
|
||||
// -----------------------------------------------------------------
|
||||
// |min=?,max=?| #13 | #14 | #15 | #16 |
|
||||
// |---------------------------------------------------------------|
|
||||
func Test_getSizeRangesIntersection(t *testing.T) {
|
||||
func Test_GetSizeRangesIntersection(t *testing.T) {
|
||||
type args struct {
|
||||
first e2evolume.SizeRange
|
||||
second e2evolume.SizeRange
|
||||
@ -463,13 +463,13 @@ func Test_getSizeRangesIntersection(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
got, err := getSizeRangesIntersection(tt.args.first, tt.args.second)
|
||||
got, err := GetSizeRangesIntersection(tt.args.first, tt.args.second)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("%q. getSizeRangesIntersection() error = %v, wantErr %v", tt.name, err, tt.wantErr)
|
||||
t.Errorf("%q. GetSizeRangesIntersection() error = %v, wantErr %v", tt.name, err, tt.wantErr)
|
||||
continue
|
||||
}
|
||||
if got != tt.want {
|
||||
t.Errorf("%q. getSizeRangesIntersection() = %v, want %v", tt.name, got, tt.want)
|
||||
t.Errorf("%q. GetSizeRangesIntersection() = %v, want %v", tt.name, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
@ -21,6 +21,7 @@ go_library(
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/pv:go_default_library",
|
||||
"//test/e2e/framework/skipper:go_default_library",
|
||||
"//test/e2e/framework/volume:go_default_library",
|
||||
"//test/e2e/storage/utils:go_default_library",
|
||||
"//test/e2e/upgrades:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
|
@ -27,7 +27,8 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
"k8s.io/kubernetes/test/e2e/upgrades"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -104,10 +105,10 @@ func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Checking if PV exists as expected volume mode")
|
||||
utils.CheckVolumeModeOfPath(f, t.pod, block, devicePath)
|
||||
e2evolume.CheckVolumeModeOfPath(f, t.pod, block, devicePath)
|
||||
|
||||
ginkgo.By("Checking if read/write to PV works properly")
|
||||
utils.CheckReadWriteToPath(f, t.pod, block, devicePath)
|
||||
storageutils.CheckReadWriteToPath(f, t.pod, block, devicePath)
|
||||
}
|
||||
|
||||
// Test waits for the downgrade to complete, and then verifies that a pod can no
|
||||
@ -117,7 +118,7 @@ func (t *VolumeModeDowngradeTest) Test(f *framework.Framework, done <-chan struc
|
||||
<-done
|
||||
|
||||
ginkgo.By("Verifying that nothing exists at the device path in the pod")
|
||||
utils.VerifyExecInPodFail(f, t.pod, fmt.Sprintf("test -e %s", devicePath), 1)
|
||||
e2evolume.VerifyExecInPodFail(f, t.pod, fmt.Sprintf("test -e %s", devicePath), 1)
|
||||
}
|
||||
|
||||
// Teardown cleans up any remaining resources.
|
||||
|
Loading…
Reference in New Issue
Block a user