mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 03:11:40 +00:00
Create mock CSI driver resources in different namespace
This commit is contained in:
parent
708261e06c
commit
da941d8d3e
@ -244,7 +244,7 @@ OUTER:
|
||||
|
||||
// WaitForNamespacesDeleted waits for the namespaces to be deleted.
|
||||
func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeout time.Duration) error {
|
||||
ginkgo.By("Waiting for namespaces to vanish")
|
||||
ginkgo.By(fmt.Sprintf("Waiting for namespaces %+v to vanish", namespaces))
|
||||
nsMap := map[string]bool{}
|
||||
for _, ns := range namespaces {
|
||||
nsMap[ns] = true
|
||||
|
@ -48,8 +48,6 @@ import (
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
type cleanupFuncs func()
|
||||
|
||||
const (
|
||||
csiNodeLimitUpdateTimeout = 5 * time.Minute
|
||||
csiPodUnschedulableTimeout = 5 * time.Minute
|
||||
@ -102,7 +100,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
type mockDriverSetup struct {
|
||||
cs clientset.Interface
|
||||
config *testsuites.PerTestConfig
|
||||
testCleanups []cleanupFuncs
|
||||
testCleanups []func()
|
||||
pods []*v1.Pod
|
||||
pvcs []*v1.PersistentVolumeClaim
|
||||
sc map[string]*storagev1.StorageClass
|
||||
@ -377,7 +375,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
framework.ExpectNoError(err, "while deleting")
|
||||
|
||||
ginkgo.By("Checking CSI driver logs")
|
||||
err = checkPodLogs(m.cs, f.Namespace.Name, driverPodName, driverContainerName, pod, test.expectPodInfo, test.expectEphemeral, csiInlineVolumesEnabled)
|
||||
err = checkPodLogs(m.cs, m.config.DriverNamespace.Name, driverPodName, driverContainerName, pod, test.expectPodInfo, test.expectEphemeral, csiInlineVolumesEnabled)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
}
|
||||
@ -700,7 +698,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
framework.Failf("timed out waiting for the CSI call that indicates that the pod can be deleted: %v", test.expectedCalls)
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
_, index, err := compareCSICalls(trackedCalls, test.expectedCalls, m.cs, f.Namespace.Name, driverPodName, driverContainerName)
|
||||
_, index, err := compareCSICalls(trackedCalls, test.expectedCalls, m.cs, m.config.DriverNamespace.Name, driverPodName, driverContainerName)
|
||||
framework.ExpectNoError(err, "while waiting for initial CSI calls")
|
||||
if index == 0 {
|
||||
// No CSI call received yet
|
||||
@ -724,7 +722,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
|
||||
ginkgo.By("Waiting for all remaining expected CSI calls")
|
||||
err = wait.Poll(time.Second, csiUnstageWaitTimeout, func() (done bool, err error) {
|
||||
_, index, err := compareCSICalls(trackedCalls, test.expectedCalls, m.cs, f.Namespace.Name, driverPodName, driverContainerName)
|
||||
_, index, err := compareCSICalls(trackedCalls, test.expectedCalls, m.cs, m.config.DriverNamespace.Name, driverPodName, driverContainerName)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
@ -852,7 +850,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
|
||||
var calls []mockCSICall
|
||||
err = wait.PollImmediateUntil(time.Second, func() (done bool, err error) {
|
||||
c, index, err := compareCSICalls(deterministicCalls, expected, m.cs, f.Namespace.Name, driverPodName, driverContainerName)
|
||||
c, index, err := compareCSICalls(deterministicCalls, expected, m.cs, m.config.DriverNamespace.Name, driverPodName, driverContainerName)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("error waiting for expected CSI calls: %s", err)
|
||||
}
|
||||
@ -1090,7 +1088,7 @@ type mockCSICall struct {
|
||||
func checkPodLogs(cs clientset.Interface, namespace, driverPodName, driverContainerName string, pod *v1.Pod, expectPodInfo, ephemeralVolume, csiInlineVolumesEnabled bool) error {
|
||||
expectedAttributes := map[string]string{
|
||||
"csi.storage.k8s.io/pod.name": pod.Name,
|
||||
"csi.storage.k8s.io/pod.namespace": namespace,
|
||||
"csi.storage.k8s.io/pod.namespace": pod.Namespace,
|
||||
"csi.storage.k8s.io/pod.uid": string(pod.UID),
|
||||
"csi.storage.k8s.io/serviceAccount.name": "default",
|
||||
}
|
||||
|
@ -73,6 +73,7 @@ const (
|
||||
type hostpathCSIDriver struct {
|
||||
driverInfo testsuites.DriverInfo
|
||||
manifests []string
|
||||
cleanupHandle framework.CleanupActionHandle
|
||||
volumeAttributes []map[string]string
|
||||
}
|
||||
|
||||
@ -169,8 +170,13 @@ func (h *hostpathCSIDriver) GetSnapshotClass(config *testsuites.PerTestConfig) *
|
||||
}
|
||||
|
||||
func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||
// Create secondary namespace which will be used for creating driver
|
||||
driverNamespace := utils.CreateDriverNamespace(f)
|
||||
ns2 := driverNamespace.Name
|
||||
ns1 := f.Namespace.Name
|
||||
|
||||
ginkgo.By(fmt.Sprintf("deploying %s driver", h.driverInfo.Name))
|
||||
cancelLogging := testsuites.StartPodLogs(f)
|
||||
cancelLogging := testsuites.StartPodLogs(f, driverNamespace)
|
||||
cs := f.ClientSet
|
||||
|
||||
// The hostpath CSI driver only works when everything runs on the same node.
|
||||
@ -181,6 +187,7 @@ func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.Per
|
||||
Prefix: "hostpath",
|
||||
Framework: f,
|
||||
ClientNodeSelection: e2epod.NodeSelection{Name: node.Name},
|
||||
DriverNamespace: driverNamespace,
|
||||
}
|
||||
|
||||
o := utils.PatchCSIOptions{
|
||||
@ -192,19 +199,33 @@ func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.Per
|
||||
SnapshotterContainerName: "csi-snapshotter",
|
||||
NodeName: node.Name,
|
||||
}
|
||||
cleanup, err := utils.CreateFromManifests(config.Framework, func(item interface{}) error {
|
||||
cleanup, err := utils.CreateFromManifests(config.Framework, driverNamespace, func(item interface{}) error {
|
||||
return utils.PatchCSIDeployment(config.Framework, o, item)
|
||||
},
|
||||
h.manifests...)
|
||||
}, h.manifests...)
|
||||
|
||||
if err != nil {
|
||||
framework.Failf("deploying %s driver: %v", h.driverInfo.Name, err)
|
||||
}
|
||||
|
||||
return config, func() {
|
||||
ginkgo.By(fmt.Sprintf("uninstalling %s driver", h.driverInfo.Name))
|
||||
cleanup()
|
||||
cancelLogging()
|
||||
// Cleanup CSI driver and namespaces. This function needs to be idempotent and can be
|
||||
// concurrently called from defer (or AfterEach) and AfterSuite action hooks.
|
||||
cleanupFunc := func() {
|
||||
framework.RemoveCleanupAction(h.cleanupHandle)
|
||||
ginkgo.By(fmt.Sprintf("deleting the test namespace: %s", ns1))
|
||||
// Delete the primary namespace but its okay to fail here because this namespace will
|
||||
// also be deleted by framework.Aftereach hook
|
||||
tryFunc(deleteNamespaceFunc(f.ClientSet, ns1, framework.DefaultNamespaceDeletionTimeout))
|
||||
|
||||
ginkgo.By("uninstalling csi mock driver")
|
||||
tryFunc(cleanup)
|
||||
tryFunc(cancelLogging)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("deleting the driver namespace: %s", ns2))
|
||||
tryFunc(deleteNamespaceFunc(f.ClientSet, ns2, framework.DefaultNamespaceDeletionTimeout))
|
||||
}
|
||||
h.cleanupHandle = framework.AddCleanupAction(cleanupFunc)
|
||||
|
||||
return config, cleanupFunc
|
||||
}
|
||||
|
||||
// mockCSI
|
||||
@ -216,6 +237,7 @@ type mockCSIDriver struct {
|
||||
attachLimit int
|
||||
enableTopology bool
|
||||
enableNodeExpansion bool
|
||||
cleanupHandle framework.CleanupActionHandle
|
||||
javascriptHooks map[string]string
|
||||
}
|
||||
|
||||
@ -299,8 +321,13 @@ func (m *mockCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTe
|
||||
}
|
||||
|
||||
func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||
// Create secondary namespace which will be used for creating driver
|
||||
driverNamespace := utils.CreateDriverNamespace(f)
|
||||
ns2 := driverNamespace.Name
|
||||
ns1 := f.Namespace.Name
|
||||
|
||||
ginkgo.By("deploying csi mock driver")
|
||||
cancelLogging := testsuites.StartPodLogs(f)
|
||||
cancelLogging := testsuites.StartPodLogs(f, driverNamespace)
|
||||
cs := f.ClientSet
|
||||
|
||||
// pods should be scheduled on the node
|
||||
@ -311,6 +338,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest
|
||||
Prefix: "mock",
|
||||
Framework: f,
|
||||
ClientNodeSelection: e2epod.NodeSelection{Name: node.Name},
|
||||
DriverNamespace: driverNamespace,
|
||||
}
|
||||
|
||||
containerArgs := []string{"--name=csi-mock-" + f.UniqueName}
|
||||
@ -343,7 +371,8 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest
|
||||
"hooks.yaml": string(hooksYaml),
|
||||
},
|
||||
}
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), hooks, metav1.CreateOptions{})
|
||||
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(ns2).Create(context.TODO(), hooks, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if len(m.javascriptHooks) > 0 {
|
||||
@ -364,28 +393,46 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest
|
||||
storagev1.VolumeLifecycleEphemeral,
|
||||
},
|
||||
}
|
||||
cleanup, err := utils.CreateFromManifests(f, func(item interface{}) error {
|
||||
cleanup, err := utils.CreateFromManifests(f, driverNamespace, func(item interface{}) error {
|
||||
return utils.PatchCSIDeployment(f, o, item)
|
||||
},
|
||||
m.manifests...)
|
||||
}, m.manifests...)
|
||||
|
||||
if err != nil {
|
||||
framework.Failf("deploying csi mock driver: %v", err)
|
||||
}
|
||||
|
||||
return config, func() {
|
||||
// Cleanup CSI driver and namespaces. This function needs to be idempotent and can be
|
||||
// concurrently called from defer (or AfterEach) and AfterSuite action hooks.
|
||||
cleanupFunc := func() {
|
||||
framework.RemoveCleanupAction(m.cleanupHandle)
|
||||
ginkgo.By(fmt.Sprintf("deleting the test namespace: %s", ns1))
|
||||
// Delete the primary namespace but its okay to fail here because this namespace will
|
||||
// also be deleted by framework.Aftereach hook
|
||||
tryFunc(deleteNamespaceFunc(f.ClientSet, ns1, framework.DefaultNamespaceDeletionTimeout))
|
||||
|
||||
ginkgo.By("uninstalling csi mock driver")
|
||||
err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), hooksConfigMapName, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("deleting failed: %s", err)
|
||||
}
|
||||
cleanup()
|
||||
cancelLogging()
|
||||
tryFunc(func() {
|
||||
err := f.ClientSet.CoreV1().ConfigMaps(ns2).Delete(context.TODO(), hooksConfigMapName, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("deleting failed: %s", err)
|
||||
}
|
||||
})
|
||||
|
||||
tryFunc(cleanup)
|
||||
tryFunc(cancelLogging)
|
||||
ginkgo.By(fmt.Sprintf("deleting the driver namespace: %s", ns2))
|
||||
tryFunc(deleteNamespaceFunc(f.ClientSet, ns2, framework.DefaultNamespaceDeletionTimeout))
|
||||
}
|
||||
|
||||
m.cleanupHandle = framework.AddCleanupAction(cleanupFunc)
|
||||
|
||||
return config, cleanupFunc
|
||||
}
|
||||
|
||||
// gce-pd
|
||||
type gcePDCSIDriver struct {
|
||||
driverInfo testsuites.DriverInfo
|
||||
driverInfo testsuites.DriverInfo
|
||||
cleanupHandle framework.CleanupActionHandle
|
||||
}
|
||||
|
||||
var _ testsuites.TestDriver = &gcePDCSIDriver{}
|
||||
@ -473,7 +520,12 @@ func (g *gcePDCSIDriver) GetSnapshotClass(config *testsuites.PerTestConfig) *uns
|
||||
|
||||
func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTestConfig, func()) {
|
||||
ginkgo.By("deploying csi gce-pd driver")
|
||||
cancelLogging := testsuites.StartPodLogs(f)
|
||||
// Create secondary namespace which will be used for creating driver
|
||||
driverNamespace := utils.CreateDriverNamespace(f)
|
||||
ns2 := driverNamespace.Name
|
||||
ns1 := f.Namespace.Name
|
||||
|
||||
cancelLogging := testsuites.StartPodLogs(f, driverNamespace)
|
||||
// It would be safer to rename the gcePD driver, but that
|
||||
// hasn't been done before either and attempts to do so now led to
|
||||
// errors during driver registration, therefore it is disabled
|
||||
@ -486,7 +538,7 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTes
|
||||
// DriverContainerName: "gce-driver",
|
||||
// ProvisionerContainerName: "csi-external-provisioner",
|
||||
// }
|
||||
createGCESecrets(f.ClientSet, f.Namespace.Name)
|
||||
createGCESecrets(f.ClientSet, ns2)
|
||||
|
||||
manifests := []string{
|
||||
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
|
||||
@ -496,7 +548,7 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTes
|
||||
"test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml",
|
||||
}
|
||||
|
||||
cleanup, err := utils.CreateFromManifests(f, nil, manifests...)
|
||||
cleanup, err := utils.CreateFromManifests(f, driverNamespace, nil, manifests...)
|
||||
if err != nil {
|
||||
framework.Failf("deploying csi gce-pd driver: %v", err)
|
||||
}
|
||||
@ -505,15 +557,30 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTes
|
||||
framework.Failf("waiting for csi driver node registration on: %v", err)
|
||||
}
|
||||
|
||||
// Cleanup CSI driver and namespaces. This function needs to be idempotent and can be
|
||||
// concurrently called from defer (or AfterEach) and AfterSuite action hooks.
|
||||
cleanupFunc := func() {
|
||||
framework.RemoveCleanupAction(g.cleanupHandle)
|
||||
ginkgo.By(fmt.Sprintf("deleting the test namespace: %s", ns1))
|
||||
// Delete the primary namespace but its okay to fail here because this namespace will
|
||||
// also be deleted by framework.Aftereach hook
|
||||
tryFunc(deleteNamespaceFunc(f.ClientSet, ns1, framework.DefaultNamespaceDeletionTimeout))
|
||||
|
||||
ginkgo.By("uninstalling csi mock driver")
|
||||
tryFunc(cleanup)
|
||||
tryFunc(cancelLogging)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("deleting the driver namespace: %s", ns2))
|
||||
tryFunc(deleteNamespaceFunc(f.ClientSet, ns2, framework.DefaultNamespaceDeletionTimeout))
|
||||
}
|
||||
g.cleanupHandle = framework.AddCleanupAction(cleanupFunc)
|
||||
|
||||
return &testsuites.PerTestConfig{
|
||||
Driver: g,
|
||||
Prefix: "gcepd",
|
||||
Framework: f,
|
||||
}, func() {
|
||||
ginkgo.By("uninstalling gce-pd driver")
|
||||
cleanup()
|
||||
cancelLogging()
|
||||
}
|
||||
Driver: g,
|
||||
Prefix: "gcepd",
|
||||
Framework: f,
|
||||
DriverNamespace: driverNamespace,
|
||||
}, cleanupFunc
|
||||
}
|
||||
|
||||
func waitForCSIDriverRegistrationOnAllNodes(driverName string, cs clientset.Interface) error {
|
||||
@ -549,3 +616,30 @@ func waitForCSIDriverRegistrationOnNode(nodeName string, driverName string, cs c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteNamespaceFunc(cs clientset.Interface, ns string, timeout time.Duration) func() {
|
||||
return func() {
|
||||
err := cs.CoreV1().Namespaces().Delete(context.TODO(), ns, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
framework.Logf("error deleting namespace %s: %v", ns, err)
|
||||
}
|
||||
err = framework.WaitForNamespacesDeleted(cs, []string{ns}, timeout)
|
||||
if err != nil {
|
||||
framework.Logf("error deleting namespace %s: %v", ns, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func tryFunc(f func()) error {
|
||||
var err error
|
||||
if f == nil {
|
||||
return nil
|
||||
}
|
||||
defer func() {
|
||||
if recoverError := recover(); recoverError != nil {
|
||||
err = fmt.Errorf("%v", recoverError)
|
||||
}
|
||||
}()
|
||||
f()
|
||||
return err
|
||||
}
|
||||
|
2
test/e2e/storage/external/external.go
vendored
2
test/e2e/storage/external/external.go
vendored
@ -282,7 +282,7 @@ func (d *driverDefinition) GetDynamicProvisionStorageClass(e2econfig *testsuites
|
||||
framework.ExpectNoError(err, "load storage class from %s", d.StorageClass.FromFile)
|
||||
framework.ExpectEqual(len(items), 1, "exactly one item from %s", d.StorageClass.FromFile)
|
||||
|
||||
err = utils.PatchItems(f, items...)
|
||||
err = utils.PatchItems(f, f.Namespace, items...)
|
||||
framework.ExpectNoError(err, "patch items")
|
||||
|
||||
sc, ok = items[0].(*storagev1.StorageClass)
|
||||
|
@ -554,10 +554,11 @@ func getSnapshot(claimName string, ns, snapshotClassName string) *unstructured.U
|
||||
//
|
||||
// The output goes to log files (when using --report-dir, as in the
|
||||
// CI) or the output stream (otherwise).
|
||||
func StartPodLogs(f *framework.Framework) func() {
|
||||
func StartPodLogs(f *framework.Framework, driverNamespace *v1.Namespace) func() {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cs := f.ClientSet
|
||||
ns := f.Namespace
|
||||
|
||||
ns := driverNamespace.Name
|
||||
|
||||
to := podlogs.LogOutput{
|
||||
StatusWriter: ginkgo.GinkgoWriter,
|
||||
@ -575,13 +576,13 @@ func StartPodLogs(f *framework.Framework) func() {
|
||||
to.LogPathPrefix = framework.TestContext.ReportDir + "/" +
|
||||
reg.ReplaceAllString(test.FullTestText, "_") + "/"
|
||||
}
|
||||
podlogs.CopyAllLogs(ctx, cs, ns.Name, to)
|
||||
podlogs.CopyAllLogs(ctx, cs, ns, to)
|
||||
|
||||
// pod events are something that the framework already collects itself
|
||||
// after a failed test. Logging them live is only useful for interactive
|
||||
// debugging, not when we collect reports.
|
||||
if framework.TestContext.ReportDir == "" {
|
||||
podlogs.WatchPods(ctx, cs, ns.Name, ginkgo.GinkgoWriter)
|
||||
podlogs.WatchPods(ctx, cs, ns, ginkgo.GinkgoWriter)
|
||||
}
|
||||
|
||||
return cancel
|
||||
|
@ -226,6 +226,9 @@ type PerTestConfig struct {
|
||||
// the configuration that then has to be used to run tests.
|
||||
// The values above are ignored for such tests.
|
||||
ServerConfig *e2evolume.TestConfig
|
||||
|
||||
// Some drivers run in their own namespace
|
||||
DriverNamespace *v1.Namespace
|
||||
}
|
||||
|
||||
// GetUniqueDriverName returns unique driver name that can be used parallelly in tests
|
||||
|
@ -114,11 +114,11 @@ func visitManifests(cb func([]byte) error, files ...string) error {
|
||||
// PatchItems has some limitations:
|
||||
// - only some common items are supported, unknown ones trigger an error
|
||||
// - only the latest stable API version for each item is supported
|
||||
func PatchItems(f *framework.Framework, items ...interface{}) error {
|
||||
func PatchItems(f *framework.Framework, driverNamspace *v1.Namespace, items ...interface{}) error {
|
||||
for _, item := range items {
|
||||
// Uncomment when debugging the loading and patching of items.
|
||||
// Logf("patching original content of %T:\n%s", item, PrettyPrint(item))
|
||||
if err := patchItemRecursively(f, item); err != nil {
|
||||
if err := patchItemRecursively(f, driverNamspace, item); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -139,7 +139,7 @@ func PatchItems(f *framework.Framework, items ...interface{}) error {
|
||||
// PatchItems has the some limitations as LoadFromManifests:
|
||||
// - only some common items are supported, unknown ones trigger an error
|
||||
// - only the latest stable API version for each item is supported
|
||||
func CreateItems(f *framework.Framework, items ...interface{}) (func(), error) {
|
||||
func CreateItems(f *framework.Framework, ns *v1.Namespace, items ...interface{}) (func(), error) {
|
||||
var destructors []func() error
|
||||
cleanup := func() {
|
||||
// TODO (?): use same logic as framework.go for determining
|
||||
@ -163,7 +163,7 @@ func CreateItems(f *framework.Framework, items ...interface{}) (func(), error) {
|
||||
// description = fmt.Sprintf("%s:\n%s", description, PrettyPrint(item))
|
||||
framework.Logf("creating %s", description)
|
||||
for _, factory := range factories {
|
||||
destructor, err := factory.Create(f, item)
|
||||
destructor, err := factory.Create(f, ns, item)
|
||||
if destructor != nil {
|
||||
destructors = append(destructors, func() error {
|
||||
framework.Logf("deleting %s", description)
|
||||
@ -195,12 +195,12 @@ func CreateItems(f *framework.Framework, items ...interface{}) (func(), error) {
|
||||
// CreateFromManifests is a combination of LoadFromManifests,
|
||||
// PatchItems, patching with an optional custom function,
|
||||
// and CreateItems.
|
||||
func CreateFromManifests(f *framework.Framework, patch func(item interface{}) error, files ...string) (func(), error) {
|
||||
func CreateFromManifests(f *framework.Framework, driverNamespace *v1.Namespace, patch func(item interface{}) error, files ...string) (func(), error) {
|
||||
items, err := LoadFromManifests(files...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "CreateFromManifests")
|
||||
}
|
||||
if err := PatchItems(f, items...); err != nil {
|
||||
if err := PatchItems(f, driverNamespace, items...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if patch != nil {
|
||||
@ -210,7 +210,7 @@ func CreateFromManifests(f *framework.Framework, patch func(item interface{}) er
|
||||
}
|
||||
}
|
||||
}
|
||||
return CreateItems(f, items...)
|
||||
return CreateItems(f, driverNamespace, items...)
|
||||
}
|
||||
|
||||
// What is a subset of metav1.TypeMeta which (in contrast to
|
||||
@ -250,7 +250,7 @@ type ItemFactory interface {
|
||||
// error or a cleanup function for the created item.
|
||||
// If the item is of an unsupported type, it must return
|
||||
// an error that has errorItemNotSupported as cause.
|
||||
Create(f *framework.Framework, item interface{}) (func() error, error)
|
||||
Create(f *framework.Framework, ns *v1.Namespace, item interface{}) (func() error, error)
|
||||
}
|
||||
|
||||
// describeItem always returns a string that describes the item,
|
||||
@ -294,16 +294,21 @@ func PatchName(f *framework.Framework, item *string) {
|
||||
// PatchNamespace moves the item into the test's namespace. Not
|
||||
// all items can be namespaced. For those, the name also needs to be
|
||||
// patched.
|
||||
func PatchNamespace(f *framework.Framework, item *string) {
|
||||
func PatchNamespace(f *framework.Framework, driverNamespace *v1.Namespace, item *string) {
|
||||
if driverNamespace != nil {
|
||||
*item = driverNamespace.GetName()
|
||||
return
|
||||
}
|
||||
|
||||
if f.Namespace != nil {
|
||||
*item = f.Namespace.GetName()
|
||||
}
|
||||
}
|
||||
|
||||
func patchItemRecursively(f *framework.Framework, item interface{}) error {
|
||||
func patchItemRecursively(f *framework.Framework, driverNamespace *v1.Namespace, item interface{}) error {
|
||||
switch item := item.(type) {
|
||||
case *rbacv1.Subject:
|
||||
PatchNamespace(f, &item.Namespace)
|
||||
PatchNamespace(f, driverNamespace, &item.Namespace)
|
||||
case *rbacv1.RoleRef:
|
||||
// TODO: avoid hard-coding this special name. Perhaps add a Framework.PredefinedRoles
|
||||
// which contains all role names that are defined cluster-wide before the test starts?
|
||||
@ -315,7 +320,7 @@ func patchItemRecursively(f *framework.Framework, item interface{}) error {
|
||||
case *rbacv1.ClusterRole:
|
||||
PatchName(f, &item.Name)
|
||||
case *rbacv1.Role:
|
||||
PatchNamespace(f, &item.Namespace)
|
||||
PatchNamespace(f, driverNamespace, &item.Namespace)
|
||||
// Roles are namespaced, but because for RoleRef above we don't
|
||||
// know whether the referenced role is a ClusterRole or Role
|
||||
// and therefore always renames, we have to do the same here.
|
||||
@ -325,33 +330,33 @@ func patchItemRecursively(f *framework.Framework, item interface{}) error {
|
||||
case *storagev1.CSIDriver:
|
||||
PatchName(f, &item.Name)
|
||||
case *v1.ServiceAccount:
|
||||
PatchNamespace(f, &item.ObjectMeta.Namespace)
|
||||
PatchNamespace(f, driverNamespace, &item.ObjectMeta.Namespace)
|
||||
case *v1.Secret:
|
||||
PatchNamespace(f, &item.ObjectMeta.Namespace)
|
||||
PatchNamespace(f, driverNamespace, &item.ObjectMeta.Namespace)
|
||||
case *rbacv1.ClusterRoleBinding:
|
||||
PatchName(f, &item.Name)
|
||||
for i := range item.Subjects {
|
||||
if err := patchItemRecursively(f, &item.Subjects[i]); err != nil {
|
||||
if err := patchItemRecursively(f, driverNamespace, &item.Subjects[i]); err != nil {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
}
|
||||
}
|
||||
if err := patchItemRecursively(f, &item.RoleRef); err != nil {
|
||||
if err := patchItemRecursively(f, driverNamespace, &item.RoleRef); err != nil {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
}
|
||||
case *rbacv1.RoleBinding:
|
||||
PatchNamespace(f, &item.Namespace)
|
||||
PatchNamespace(f, driverNamespace, &item.Namespace)
|
||||
for i := range item.Subjects {
|
||||
if err := patchItemRecursively(f, &item.Subjects[i]); err != nil {
|
||||
if err := patchItemRecursively(f, driverNamespace, &item.Subjects[i]); err != nil {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
}
|
||||
}
|
||||
if err := patchItemRecursively(f, &item.RoleRef); err != nil {
|
||||
if err := patchItemRecursively(f, driverNamespace, &item.RoleRef); err != nil {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
}
|
||||
case *v1.Service:
|
||||
PatchNamespace(f, &item.ObjectMeta.Namespace)
|
||||
PatchNamespace(f, driverNamespace, &item.ObjectMeta.Namespace)
|
||||
case *appsv1.StatefulSet:
|
||||
PatchNamespace(f, &item.ObjectMeta.Namespace)
|
||||
PatchNamespace(f, driverNamespace, &item.ObjectMeta.Namespace)
|
||||
if err := patchContainerImages(item.Spec.Template.Spec.Containers); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -359,7 +364,7 @@ func patchItemRecursively(f *framework.Framework, item interface{}) error {
|
||||
return err
|
||||
}
|
||||
case *appsv1.DaemonSet:
|
||||
PatchNamespace(f, &item.ObjectMeta.Namespace)
|
||||
PatchNamespace(f, driverNamespace, &item.ObjectMeta.Namespace)
|
||||
if err := patchContainerImages(item.Spec.Template.Spec.Containers); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -383,12 +388,12 @@ func (f *serviceAccountFactory) New() runtime.Object {
|
||||
return &v1.ServiceAccount{}
|
||||
}
|
||||
|
||||
func (*serviceAccountFactory) Create(f *framework.Framework, i interface{}) (func() error, error) {
|
||||
func (*serviceAccountFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*v1.ServiceAccount)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
client := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.GetName())
|
||||
client := f.ClientSet.CoreV1().ServiceAccounts(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create ServiceAccount")
|
||||
}
|
||||
@ -403,7 +408,7 @@ func (f *clusterRoleFactory) New() runtime.Object {
|
||||
return &rbacv1.ClusterRole{}
|
||||
}
|
||||
|
||||
func (*clusterRoleFactory) Create(f *framework.Framework, i interface{}) (func() error, error) {
|
||||
func (*clusterRoleFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*rbacv1.ClusterRole)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
@ -425,7 +430,7 @@ func (f *clusterRoleBindingFactory) New() runtime.Object {
|
||||
return &rbacv1.ClusterRoleBinding{}
|
||||
}
|
||||
|
||||
func (*clusterRoleBindingFactory) Create(f *framework.Framework, i interface{}) (func() error, error) {
|
||||
func (*clusterRoleBindingFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*rbacv1.ClusterRoleBinding)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
@ -446,13 +451,13 @@ func (f *roleFactory) New() runtime.Object {
|
||||
return &rbacv1.Role{}
|
||||
}
|
||||
|
||||
func (*roleFactory) Create(f *framework.Framework, i interface{}) (func() error, error) {
|
||||
func (*roleFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*rbacv1.Role)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.RbacV1().Roles(f.Namespace.GetName())
|
||||
client := f.ClientSet.RbacV1().Roles(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create Role")
|
||||
}
|
||||
@ -467,13 +472,13 @@ func (f *roleBindingFactory) New() runtime.Object {
|
||||
return &rbacv1.RoleBinding{}
|
||||
}
|
||||
|
||||
func (*roleBindingFactory) Create(f *framework.Framework, i interface{}) (func() error, error) {
|
||||
func (*roleBindingFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*rbacv1.RoleBinding)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.RbacV1().RoleBindings(f.Namespace.GetName())
|
||||
client := f.ClientSet.RbacV1().RoleBindings(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create RoleBinding")
|
||||
}
|
||||
@ -488,13 +493,13 @@ func (f *serviceFactory) New() runtime.Object {
|
||||
return &v1.Service{}
|
||||
}
|
||||
|
||||
func (*serviceFactory) Create(f *framework.Framework, i interface{}) (func() error, error) {
|
||||
func (*serviceFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*v1.Service)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.CoreV1().Services(f.Namespace.GetName())
|
||||
client := f.ClientSet.CoreV1().Services(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create Service")
|
||||
}
|
||||
@ -509,13 +514,13 @@ func (f *statefulSetFactory) New() runtime.Object {
|
||||
return &appsv1.StatefulSet{}
|
||||
}
|
||||
|
||||
func (*statefulSetFactory) Create(f *framework.Framework, i interface{}) (func() error, error) {
|
||||
func (*statefulSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*appsv1.StatefulSet)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.AppsV1().StatefulSets(f.Namespace.GetName())
|
||||
client := f.ClientSet.AppsV1().StatefulSets(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create StatefulSet")
|
||||
}
|
||||
@ -530,13 +535,13 @@ func (f *daemonSetFactory) New() runtime.Object {
|
||||
return &appsv1.DaemonSet{}
|
||||
}
|
||||
|
||||
func (*daemonSetFactory) Create(f *framework.Framework, i interface{}) (func() error, error) {
|
||||
func (*daemonSetFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*appsv1.DaemonSet)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.AppsV1().DaemonSets(f.Namespace.GetName())
|
||||
client := f.ClientSet.AppsV1().DaemonSets(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create DaemonSet")
|
||||
}
|
||||
@ -551,7 +556,7 @@ func (f *storageClassFactory) New() runtime.Object {
|
||||
return &storagev1.StorageClass{}
|
||||
}
|
||||
|
||||
func (*storageClassFactory) Create(f *framework.Framework, i interface{}) (func() error, error) {
|
||||
func (*storageClassFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*storagev1.StorageClass)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
@ -572,7 +577,7 @@ func (f *csiDriverFactory) New() runtime.Object {
|
||||
return &storagev1.CSIDriver{}
|
||||
}
|
||||
|
||||
func (*csiDriverFactory) Create(f *framework.Framework, i interface{}) (func() error, error) {
|
||||
func (*csiDriverFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*storagev1.CSIDriver)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
@ -593,13 +598,13 @@ func (f *secretFactory) New() runtime.Object {
|
||||
return &v1.Secret{}
|
||||
}
|
||||
|
||||
func (*secretFactory) Create(f *framework.Framework, i interface{}) (func() error, error) {
|
||||
func (*secretFactory) Create(f *framework.Framework, ns *v1.Namespace, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*v1.Secret)
|
||||
if !ok {
|
||||
return nil, errorItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.CoreV1().Secrets(f.Namespace.GetName())
|
||||
client := f.ClientSet.CoreV1().Secrets(ns.Name)
|
||||
if _, err := client.Create(context.TODO(), item, metav1.CreateOptions{}); err != nil {
|
||||
return nil, errors.Wrap(err, "create Secret")
|
||||
}
|
||||
|
@ -703,3 +703,22 @@ func findMountPoints(hostExec HostExec, node *v1.Node, dir string) []string {
|
||||
func FindVolumeGlobalMountPoints(hostExec HostExec, node *v1.Node) sets.String {
|
||||
return sets.NewString(findMountPoints(hostExec, node, "/var/lib/kubelet/plugins")...)
|
||||
}
|
||||
|
||||
// CreateDriverNamespace creates a namespace for CSI driver installation.
|
||||
// The namespace is still tracked and ensured that gets deleted when test terminates.
|
||||
func CreateDriverNamespace(f *framework.Framework) *v1.Namespace {
|
||||
ginkgo.By(fmt.Sprintf("Building a driver namespace object, basename %s", f.BaseName))
|
||||
namespace, err := f.CreateNamespace(f.BaseName, map[string]string{
|
||||
"e2e-framework": f.BaseName,
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
if framework.TestContext.VerifyServiceAccount {
|
||||
ginkgo.By("Waiting for a default service account to be provisioned in namespace")
|
||||
err = framework.WaitForDefaultServiceAccountInNamespace(f.ClientSet, namespace.Name)
|
||||
framework.ExpectNoError(err)
|
||||
} else {
|
||||
framework.Logf("Skipping waiting for service account")
|
||||
}
|
||||
return namespace
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user