mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 12:43:23 +00:00
clean up duplication
This commit is contained in:
parent
2f52e91d56
commit
abf8df7543
280
test/e2e/storage/csi_mock_volume_test.go
Normal file
280
test/e2e/storage/csi_mock_volume_test.go
Normal file
@ -0,0 +1,280 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
type cleanupFuncs func()
|
||||
|
||||
var _ = utils.SIGDescribe("CSI Mock volumes", func() {
|
||||
type mockDriverSetup struct {
|
||||
cs clientset.Interface
|
||||
config *testsuites.PerTestConfig
|
||||
testCleanups []cleanupFuncs
|
||||
pods []*v1.Pod
|
||||
pvcs []*v1.PersistentVolumeClaim
|
||||
sc map[string]*storage.StorageClass
|
||||
driver testsuites.TestDriver
|
||||
provisioner string
|
||||
}
|
||||
var m mockDriverSetup
|
||||
var attachable bool
|
||||
var deployCRD bool
|
||||
var podInfoVersion *string
|
||||
var scName string
|
||||
f := framework.NewDefaultFramework("csi-mock-volumes")
|
||||
|
||||
init := func() {
|
||||
m = mockDriverSetup{cs: f.ClientSet}
|
||||
csics := f.CSIClientSet
|
||||
var err error
|
||||
|
||||
m.driver = drivers.InitMockCSIDriver(deployCRD, attachable, podInfoVersion)
|
||||
config, testCleanup := m.driver.PrepareTest(f)
|
||||
m.testCleanups = append(m.testCleanups, testCleanup)
|
||||
m.config = config
|
||||
|
||||
if deployCRD {
|
||||
err = waitForCSIDriver(csics, m.config.GetUniqueDriverName())
|
||||
framework.ExpectNoError(err, "Failed to get CSIDriver : %v", err)
|
||||
m.testCleanups = append(m.testCleanups, func() {
|
||||
destroyCSIDriver(csics, m.config.GetUniqueDriverName())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
createPod := func() (*storage.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
|
||||
By("Creating pod")
|
||||
var sc *storagev1.StorageClass
|
||||
if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok {
|
||||
sc = dDriver.GetDynamicProvisionStorageClass(m.config, "")
|
||||
}
|
||||
m.provisioner = sc.Provisioner
|
||||
nodeName := m.config.ClientNodeName
|
||||
scTest := testsuites.StorageClassTest{
|
||||
Name: m.driver.GetDriverInfo().Name,
|
||||
Provisioner: sc.Provisioner,
|
||||
Parameters: sc.Parameters,
|
||||
ClaimSize: "1Gi",
|
||||
ExpectedSize: "1Gi",
|
||||
}
|
||||
if scName != "" {
|
||||
scTest.StorageClassName = scName
|
||||
}
|
||||
nodeSelection := testsuites.NodeSelection{
|
||||
// The mock driver only works when everything runs on a single node.
|
||||
Name: nodeName,
|
||||
}
|
||||
class, claim, pod := startPausePod(f.ClientSet, scTest, nodeSelection, f.Namespace.Name)
|
||||
if class != nil {
|
||||
m.sc[class.Name] = class
|
||||
}
|
||||
if claim != nil {
|
||||
m.pvcs = append(m.pvcs, claim)
|
||||
}
|
||||
if pod != nil {
|
||||
m.pods = append(m.pods, pod)
|
||||
}
|
||||
return class, claim, pod
|
||||
}
|
||||
|
||||
resetSharedVariables := func() {
|
||||
attachable = false
|
||||
deployCRD = false
|
||||
scName = ""
|
||||
podInfoVersion = nil
|
||||
}
|
||||
|
||||
cleanup := func() {
|
||||
cs := f.ClientSet
|
||||
var errs []error
|
||||
By("Deleting pod")
|
||||
for _, pod := range m.pods {
|
||||
errs = append(errs, framework.DeletePodWithWait(f, cs, pod))
|
||||
}
|
||||
|
||||
By("Deleting claim")
|
||||
for _, claim := range m.pvcs {
|
||||
claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
|
||||
framework.WaitForPersistentVolumeDeleted(cs, claim.Spec.VolumeName, framework.Poll, 2*time.Minute)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
By("Deleting storageclass")
|
||||
for _, sc := range m.sc {
|
||||
cs.StorageV1().StorageClasses().Delete(sc.Name, nil)
|
||||
}
|
||||
|
||||
By("Cleaning up resources")
|
||||
for _, cleanupFunc := range m.testCleanups {
|
||||
cleanupFunc()
|
||||
}
|
||||
|
||||
// reset some of common variables
|
||||
resetSharedVariables()
|
||||
err := utilerrors.NewAggregate(errs)
|
||||
Expect(err).NotTo(HaveOccurred(), "while cleaning up after test")
|
||||
}
|
||||
|
||||
// The CSIDriverRegistry feature gate is needed for this test in Kubernetes 1.12.
|
||||
Context("CSI attach test using mock driver [Feature:CSIDriverRegistry]", func() {
|
||||
tests := []struct {
|
||||
name string
|
||||
driverAttachable bool
|
||||
deployDriverCRD bool
|
||||
}{
|
||||
{
|
||||
name: "should not require VolumeAttach for drivers without attachment",
|
||||
driverAttachable: false,
|
||||
deployDriverCRD: true,
|
||||
},
|
||||
{
|
||||
name: "should require VolumeAttach for drivers with attachment",
|
||||
driverAttachable: true,
|
||||
deployDriverCRD: true,
|
||||
},
|
||||
{
|
||||
name: "should preserve attachment policy when no CSIDriver present",
|
||||
driverAttachable: true,
|
||||
deployDriverCRD: false,
|
||||
},
|
||||
}
|
||||
for _, t := range tests {
|
||||
It(t.name, func() {
|
||||
deployCRD = t.deployDriverCRD
|
||||
attachable = t.driverAttachable
|
||||
var err error
|
||||
init()
|
||||
defer cleanup()
|
||||
|
||||
_, claim, pod := createPod()
|
||||
if pod == nil {
|
||||
return
|
||||
}
|
||||
err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
|
||||
framework.ExpectNoError(err, "Failed to start pod: %v", err)
|
||||
|
||||
By("Checking if VolumeAttachment was created for the pod")
|
||||
handle := getVolumeHandle(m.cs, claim)
|
||||
attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, m.provisioner, m.config.ClientNodeName)))
|
||||
attachmentName := fmt.Sprintf("csi-%x", attachmentHash)
|
||||
_, err = m.cs.StorageV1beta1().VolumeAttachments().Get(attachmentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
if t.driverAttachable {
|
||||
framework.ExpectNoError(err, "Expected VolumeAttachment but none was found")
|
||||
}
|
||||
} else {
|
||||
framework.ExpectNoError(err, "Failed to find VolumeAttachment")
|
||||
}
|
||||
}
|
||||
if !t.driverAttachable {
|
||||
Expect(err).To(HaveOccurred(), "Unexpected VolumeAttachment found")
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
})
|
||||
|
||||
Context("CSI workload information using mock driver [Feature:CSIDriverRegistry]", func() {
|
||||
var (
|
||||
err error
|
||||
podInfoV1 = "v1"
|
||||
podInfoUnknown = "unknown"
|
||||
podInfoEmpty = ""
|
||||
)
|
||||
tests := []struct {
|
||||
name string
|
||||
podInfoOnMountVersion *string
|
||||
deployDriverCRD bool
|
||||
expectPodInfo bool
|
||||
}{
|
||||
{
|
||||
name: "should not be passed when podInfoOnMountVersion=nil",
|
||||
podInfoOnMountVersion: nil,
|
||||
deployDriverCRD: true,
|
||||
expectPodInfo: false,
|
||||
},
|
||||
{
|
||||
name: "should be passed when podInfoOnMountVersion=v1",
|
||||
podInfoOnMountVersion: &podInfoV1,
|
||||
deployDriverCRD: true,
|
||||
expectPodInfo: true,
|
||||
},
|
||||
{
|
||||
name: "should not be passed when podInfoOnMountVersion=<empty string>",
|
||||
podInfoOnMountVersion: &podInfoEmpty,
|
||||
deployDriverCRD: true,
|
||||
expectPodInfo: false,
|
||||
},
|
||||
{
|
||||
name: "should not be passed when podInfoOnMountVersion=<unknown string>",
|
||||
podInfoOnMountVersion: &podInfoUnknown,
|
||||
deployDriverCRD: true,
|
||||
expectPodInfo: false,
|
||||
},
|
||||
{
|
||||
name: "should not be passed when CSIDriver does not exist",
|
||||
deployDriverCRD: false,
|
||||
expectPodInfo: false,
|
||||
},
|
||||
}
|
||||
for _, t := range tests {
|
||||
It(t.name, func() {
|
||||
deployCRD = t.deployDriverCRD
|
||||
scName = "csi-mock-sc-" + f.UniqueName
|
||||
init()
|
||||
defer cleanup()
|
||||
|
||||
_, _, pod := createPod()
|
||||
if pod == nil {
|
||||
return
|
||||
}
|
||||
err = framework.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
|
||||
framework.ExpectNoError(err, "Failed to start pod: %v", err)
|
||||
By("Checking CSI driver logs")
|
||||
|
||||
// The driver is deployed as a statefulset with stable pod names
|
||||
driverPodName := "csi-mockplugin-0"
|
||||
err = checkPodInfo(m.cs, f.Namespace.Name, driverPodName, "mock", pod, t.expectPodInfo)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
@ -35,8 +35,6 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"crypto/sha256"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
@ -115,223 +113,6 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
|
||||
testTopologyNegative(config.Framework.ClientSet, suffix, config.Framework.Namespace.GetName(), true /* delayBinding */)
|
||||
})
|
||||
})
|
||||
|
||||
// The CSIDriverRegistry feature gate is needed for this test in Kubernetes 1.12.
|
||||
|
||||
Context("CSI attach test using mock driver [Feature:CSIDriverRegistry]", func() {
|
||||
var (
|
||||
err error
|
||||
driver testsuites.TestDriver
|
||||
)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
driverAttachable bool
|
||||
deployDriverCRD bool
|
||||
}{
|
||||
{
|
||||
name: "should not require VolumeAttach for drivers without attachment",
|
||||
driverAttachable: false,
|
||||
deployDriverCRD: true,
|
||||
},
|
||||
{
|
||||
name: "should require VolumeAttach for drivers with attachment",
|
||||
driverAttachable: true,
|
||||
deployDriverCRD: true,
|
||||
},
|
||||
{
|
||||
name: "should preserve attachment policy when no CSIDriver present",
|
||||
driverAttachable: true,
|
||||
deployDriverCRD: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, t := range tests {
|
||||
test := t
|
||||
f := framework.NewDefaultFramework("csiattach")
|
||||
|
||||
It(test.name, func() {
|
||||
cs := f.ClientSet
|
||||
csics := f.CSIClientSet
|
||||
ns := f.Namespace
|
||||
|
||||
driver = drivers.InitMockCSIDriver(test.deployDriverCRD, test.driverAttachable, nil)
|
||||
config, testCleanup := driver.PrepareTest(f)
|
||||
driverName := config.GetUniqueDriverName()
|
||||
defer testCleanup()
|
||||
|
||||
if test.deployDriverCRD {
|
||||
err = waitForCSIDriver(csics, driverName)
|
||||
framework.ExpectNoError(err, "Failed to get CSIDriver: %v", err)
|
||||
defer destroyCSIDriver(csics, driverName)
|
||||
}
|
||||
|
||||
By("Creating pod")
|
||||
var sc *storagev1.StorageClass
|
||||
if dDriver, ok := driver.(testsuites.DynamicPVTestDriver); ok {
|
||||
sc = dDriver.GetDynamicProvisionStorageClass(config, "")
|
||||
}
|
||||
nodeName := config.ClientNodeName
|
||||
scTest := testsuites.StorageClassTest{
|
||||
Name: driver.GetDriverInfo().Name,
|
||||
Provisioner: sc.Provisioner,
|
||||
Parameters: sc.Parameters,
|
||||
ClaimSize: "1Gi",
|
||||
ExpectedSize: "1Gi",
|
||||
}
|
||||
nodeSelection := testsuites.NodeSelection{
|
||||
Name: nodeName,
|
||||
}
|
||||
class, claim, pod := startPausePod(cs, scTest, nodeSelection, ns.Name)
|
||||
if class != nil {
|
||||
defer cs.StorageV1().StorageClasses().Delete(class.Name, nil)
|
||||
}
|
||||
if claim != nil {
|
||||
// Fully delete PV before deleting CSI driver
|
||||
defer deleteVolume(cs, claim)
|
||||
}
|
||||
if pod != nil {
|
||||
// Fully delete (=unmount) the pod before deleting CSI driver
|
||||
defer framework.DeletePodWithWait(f, cs, pod)
|
||||
}
|
||||
if pod == nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = framework.WaitForPodNameRunningInNamespace(cs, pod.Name, pod.Namespace)
|
||||
framework.ExpectNoError(err, "Failed to start pod: %v", err)
|
||||
|
||||
By("Checking if VolumeAttachment was created for the pod")
|
||||
handle := getVolumeHandle(cs, claim)
|
||||
attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, scTest.Provisioner, nodeName)))
|
||||
attachmentName := fmt.Sprintf("csi-%x", attachmentHash)
|
||||
_, err = cs.StorageV1beta1().VolumeAttachments().Get(attachmentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
if test.driverAttachable {
|
||||
framework.ExpectNoError(err, "Expected VolumeAttachment but none was found")
|
||||
}
|
||||
} else {
|
||||
framework.ExpectNoError(err, "Failed to find VolumeAttachment")
|
||||
}
|
||||
}
|
||||
if !test.driverAttachable {
|
||||
Expect(err).To(HaveOccurred(), "Unexpected VolumeAttachment found")
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
|
||||
Context("CSI workload information using mock driver [Feature:CSIDriverRegistry]", func() {
|
||||
var (
|
||||
err error
|
||||
driver testsuites.TestDriver
|
||||
podInfoV1 = "v1"
|
||||
podInfoUnknown = "unknown"
|
||||
podInfoEmpty = ""
|
||||
)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
podInfoOnMountVersion *string
|
||||
deployDriverCRD bool
|
||||
expectPodInfo bool
|
||||
}{
|
||||
{
|
||||
name: "should not be passed when podInfoOnMountVersion=nil",
|
||||
podInfoOnMountVersion: nil,
|
||||
deployDriverCRD: true,
|
||||
expectPodInfo: false,
|
||||
},
|
||||
{
|
||||
name: "should be passed when podInfoOnMountVersion=v1",
|
||||
podInfoOnMountVersion: &podInfoV1,
|
||||
deployDriverCRD: true,
|
||||
expectPodInfo: true,
|
||||
},
|
||||
{
|
||||
name: "should not be passed when podInfoOnMountVersion=<empty string>",
|
||||
podInfoOnMountVersion: &podInfoEmpty,
|
||||
deployDriverCRD: true,
|
||||
expectPodInfo: false,
|
||||
},
|
||||
{
|
||||
name: "should not be passed when podInfoOnMountVersion=<unknown string>",
|
||||
podInfoOnMountVersion: &podInfoUnknown,
|
||||
deployDriverCRD: true,
|
||||
expectPodInfo: false,
|
||||
},
|
||||
{
|
||||
name: "should not be passed when CSIDriver does not exist",
|
||||
deployDriverCRD: false,
|
||||
expectPodInfo: false,
|
||||
},
|
||||
}
|
||||
for _, t := range tests {
|
||||
test := t
|
||||
f := framework.NewDefaultFramework("csiworkload")
|
||||
|
||||
It(test.name, func() {
|
||||
cs := f.ClientSet
|
||||
csics := f.CSIClientSet
|
||||
ns := f.Namespace
|
||||
|
||||
driver = drivers.InitMockCSIDriver(test.deployDriverCRD, true, test.podInfoOnMountVersion)
|
||||
config, testCleanup := driver.PrepareTest(f)
|
||||
driverName := config.GetUniqueDriverName()
|
||||
defer testCleanup()
|
||||
|
||||
if test.deployDriverCRD {
|
||||
err = waitForCSIDriver(csics, driverName)
|
||||
framework.ExpectNoError(err, "Failed to get CSIDriver: %v", err)
|
||||
defer destroyCSIDriver(csics, driverName)
|
||||
}
|
||||
|
||||
By("Creating pod")
|
||||
var sc *storagev1.StorageClass
|
||||
if dDriver, ok := driver.(testsuites.DynamicPVTestDriver); ok {
|
||||
sc = dDriver.GetDynamicProvisionStorageClass(config, "")
|
||||
}
|
||||
nodeName := config.ClientNodeName
|
||||
scTest := testsuites.StorageClassTest{
|
||||
Name: driver.GetDriverInfo().Name,
|
||||
Parameters: sc.Parameters,
|
||||
ClaimSize: "1Gi",
|
||||
ExpectedSize: "1Gi",
|
||||
// Provisioner and storage class name must match what's used in
|
||||
// csi-storageclass.yaml, plus the test-specific suffix.
|
||||
Provisioner: sc.Provisioner,
|
||||
StorageClassName: "csi-mock-sc-" + f.UniqueName,
|
||||
}
|
||||
nodeSelection := testsuites.NodeSelection{
|
||||
// The mock driver only works when everything runs on a single node.
|
||||
Name: nodeName,
|
||||
}
|
||||
class, claim, pod := startPausePod(cs, scTest, nodeSelection, ns.Name)
|
||||
if class != nil {
|
||||
defer cs.StorageV1().StorageClasses().Delete(class.Name, nil)
|
||||
}
|
||||
if claim != nil {
|
||||
// Fully delete PV before deleting CSI driver
|
||||
defer deleteVolume(cs, claim)
|
||||
}
|
||||
if pod != nil {
|
||||
// Fully delete (=unmount) the pod before deleting CSI driver
|
||||
defer framework.DeletePodWithWait(f, cs, pod)
|
||||
}
|
||||
if pod == nil {
|
||||
return
|
||||
}
|
||||
err = framework.WaitForPodNameRunningInNamespace(cs, pod.Name, pod.Namespace)
|
||||
framework.ExpectNoError(err, "Failed to start pod: %v", err)
|
||||
By("Checking CSI driver logs")
|
||||
// The driver is deployed as a statefulset with stable pod names
|
||||
driverPodName := "csi-mockplugin-0"
|
||||
err = checkPodInfo(cs, f.Namespace.Name, driverPodName, "mock", pod, test.expectPodInfo)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
func testTopologyPositive(cs clientset.Interface, suffix, namespace string, delayBinding, allowedTopologies bool) {
|
||||
@ -434,15 +215,6 @@ func getVolumeHandle(cs clientset.Interface, claim *v1.PersistentVolumeClaim) st
|
||||
return pv.Spec.CSI.VolumeHandle
|
||||
}
|
||||
|
||||
func deleteVolume(cs clientset.Interface, claim *v1.PersistentVolumeClaim) {
|
||||
// re-get the claim to the latest state with bound volume
|
||||
claim, err := cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
cs.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
|
||||
framework.WaitForPersistentVolumeDeleted(cs, claim.Spec.VolumeName, framework.Poll, 2*time.Minute)
|
||||
}
|
||||
}
|
||||
|
||||
func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node testsuites.NodeSelection, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
|
||||
class := newStorageClass(t, ns, "")
|
||||
class, err := cs.StorageV1().StorageClasses().Create(class)
|
||||
|
@ -47,7 +47,6 @@ var _ = utils.SIGDescribe("Volume limits", func() {
|
||||
framework.Failf("Expected volume limits to be set")
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user