mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #81960 from pohly/ephemeral-tests
ephemeral volume tests
This commit is contained in:
commit
8b4fd4104d
@ -370,10 +370,14 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
framework.ExpectNoError(err, "failed to test for CSIInlineVolumes")
|
framework.ExpectNoError(err, "failed to test for CSIInlineVolumes")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ginkgo.By("Deleting the previously created pod")
|
||||||
|
err = e2epod.DeletePodWithWait(m.cs, pod)
|
||||||
|
framework.ExpectNoError(err, "while deleting")
|
||||||
|
|
||||||
ginkgo.By("Checking CSI driver logs")
|
ginkgo.By("Checking CSI driver logs")
|
||||||
// The driver is deployed as a statefulset with stable pod names
|
// The driver is deployed as a statefulset with stable pod names
|
||||||
driverPodName := "csi-mockplugin-0"
|
driverPodName := "csi-mockplugin-0"
|
||||||
err = checkPodInfo(m.cs, f.Namespace.Name, driverPodName, "mock", pod, test.expectPodInfo, test.expectEphemeral, csiInlineVolumesEnabled)
|
err = checkPodLogs(m.cs, f.Namespace.Name, driverPodName, "mock", pod, test.expectPodInfo, test.expectEphemeral, csiInlineVolumesEnabled)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -719,8 +723,9 @@ func startPausePodWithVolumeSource(cs clientset.Interface, volumeSource v1.Volum
|
|||||||
return cs.CoreV1().Pods(ns).Create(pod)
|
return cs.CoreV1().Pods(ns).Create(pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkPodInfo tests that NodePublish was called with expected volume_context
|
// checkPodLogs tests that NodePublish was called with expected volume_context and (for ephemeral inline volumes)
|
||||||
func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContainerName string, pod *v1.Pod, expectPodInfo, ephemeralVolume, csiInlineVolumesEnabled bool) error {
|
// has the matching NodeUnpublish
|
||||||
|
func checkPodLogs(cs clientset.Interface, namespace, driverPodName, driverContainerName string, pod *v1.Pod, expectPodInfo, ephemeralVolume, csiInlineVolumesEnabled bool) error {
|
||||||
expectedAttributes := map[string]string{
|
expectedAttributes := map[string]string{
|
||||||
"csi.storage.k8s.io/pod.name": pod.Name,
|
"csi.storage.k8s.io/pod.name": pod.Name,
|
||||||
"csi.storage.k8s.io/pod.namespace": namespace,
|
"csi.storage.k8s.io/pod.namespace": namespace,
|
||||||
@ -741,6 +746,8 @@ func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContai
|
|||||||
// Find NodePublish in the logs
|
// Find NodePublish in the logs
|
||||||
foundAttributes := sets.NewString()
|
foundAttributes := sets.NewString()
|
||||||
logLines := strings.Split(log, "\n")
|
logLines := strings.Split(log, "\n")
|
||||||
|
numNodePublishVolume := 0
|
||||||
|
numNodeUnpublishVolume := 0
|
||||||
for _, line := range logLines {
|
for _, line := range logLines {
|
||||||
if !strings.HasPrefix(line, "gRPCCall:") {
|
if !strings.HasPrefix(line, "gRPCCall:") {
|
||||||
continue
|
continue
|
||||||
@ -759,19 +766,23 @@ func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContai
|
|||||||
e2elog.Logf("Could not parse CSI driver log line %q: %s", line, err)
|
e2elog.Logf("Could not parse CSI driver log line %q: %s", line, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if call.Method != "/csi.v1.Node/NodePublishVolume" {
|
switch call.Method {
|
||||||
continue
|
case "/csi.v1.Node/NodePublishVolume":
|
||||||
}
|
numNodePublishVolume++
|
||||||
// Check that NodePublish had expected attributes
|
if numNodePublishVolume == 1 {
|
||||||
for k, v := range expectedAttributes {
|
// Check that NodePublish had expected attributes for first volume
|
||||||
vv, found := call.Request.VolumeContext[k]
|
for k, v := range expectedAttributes {
|
||||||
if found && v == vv {
|
vv, found := call.Request.VolumeContext[k]
|
||||||
foundAttributes.Insert(k)
|
if found && v == vv {
|
||||||
e2elog.Logf("Found volume attribute %s: %s", k, v)
|
foundAttributes.Insert(k)
|
||||||
|
e2elog.Logf("Found volume attribute %s: %s", k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
case "/csi.v1.Node/NodeUnpublishVolume":
|
||||||
|
e2elog.Logf("Found NodeUnpublishVolume: %+v", call)
|
||||||
|
numNodeUnpublishVolume++
|
||||||
}
|
}
|
||||||
// Process just the first NodePublish, the rest of the log is useless.
|
|
||||||
break
|
|
||||||
}
|
}
|
||||||
if expectPodInfo {
|
if expectPodInfo {
|
||||||
if foundAttributes.Len() != len(expectedAttributes) {
|
if foundAttributes.Len() != len(expectedAttributes) {
|
||||||
@ -782,6 +793,9 @@ func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContai
|
|||||||
if foundAttributes.Len() != 0 {
|
if foundAttributes.Len() != 0 {
|
||||||
return fmt.Errorf("some unexpected volume attributes were found: %+v", foundAttributes.List())
|
return fmt.Errorf("some unexpected volume attributes were found: %+v", foundAttributes.List())
|
||||||
}
|
}
|
||||||
|
if numNodePublishVolume != numNodeUnpublishVolume {
|
||||||
|
return fmt.Errorf("number of NodePublishVolume %d != number of NodeUnpublishVolume %d", numNodePublishVolume, numNodeUnpublishVolume)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -42,6 +42,7 @@ var csiTestDrivers = []func() testsuites.TestDriver{
|
|||||||
|
|
||||||
// List of testSuites to be executed in below loop
|
// List of testSuites to be executed in below loop
|
||||||
var csiTestSuites = []func() testsuites.TestSuite{
|
var csiTestSuites = []func() testsuites.TestSuite{
|
||||||
|
testsuites.InitEphemeralTestSuite,
|
||||||
testsuites.InitVolumesTestSuite,
|
testsuites.InitVolumesTestSuite,
|
||||||
testsuites.InitVolumeIOTestSuite,
|
testsuites.InitVolumeIOTestSuite,
|
||||||
testsuites.InitVolumeModeTestSuite,
|
testsuites.InitVolumeModeTestSuite,
|
||||||
|
@ -62,11 +62,12 @@ const (
|
|||||||
|
|
||||||
// hostpathCSI
|
// hostpathCSI
|
||||||
type hostpathCSIDriver struct {
|
type hostpathCSIDriver struct {
|
||||||
driverInfo testsuites.DriverInfo
|
driverInfo testsuites.DriverInfo
|
||||||
manifests []string
|
manifests []string
|
||||||
|
volumeAttributes []map[string]string
|
||||||
}
|
}
|
||||||
|
|
||||||
func initHostPathCSIDriver(name string, capabilities map[testsuites.Capability]bool, manifests ...string) testsuites.TestDriver {
|
func initHostPathCSIDriver(name string, capabilities map[testsuites.Capability]bool, volumeAttributes []map[string]string, manifests ...string) testsuites.TestDriver {
|
||||||
return &hostpathCSIDriver{
|
return &hostpathCSIDriver{
|
||||||
driverInfo: testsuites.DriverInfo{
|
driverInfo: testsuites.DriverInfo{
|
||||||
Name: name,
|
Name: name,
|
||||||
@ -77,13 +78,15 @@ func initHostPathCSIDriver(name string, capabilities map[testsuites.Capability]b
|
|||||||
),
|
),
|
||||||
Capabilities: capabilities,
|
Capabilities: capabilities,
|
||||||
},
|
},
|
||||||
manifests: manifests,
|
manifests: manifests,
|
||||||
|
volumeAttributes: volumeAttributes,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ testsuites.TestDriver = &hostpathCSIDriver{}
|
var _ testsuites.TestDriver = &hostpathCSIDriver{}
|
||||||
var _ testsuites.DynamicPVTestDriver = &hostpathCSIDriver{}
|
var _ testsuites.DynamicPVTestDriver = &hostpathCSIDriver{}
|
||||||
var _ testsuites.SnapshottableTestDriver = &hostpathCSIDriver{}
|
var _ testsuites.SnapshottableTestDriver = &hostpathCSIDriver{}
|
||||||
|
var _ testsuites.EphemeralTestDriver = &hostpathCSIDriver{}
|
||||||
|
|
||||||
// InitHostPathCSIDriver returns hostpathCSIDriver that implements TestDriver interface
|
// InitHostPathCSIDriver returns hostpathCSIDriver that implements TestDriver interface
|
||||||
func InitHostPathCSIDriver() testsuites.TestDriver {
|
func InitHostPathCSIDriver() testsuites.TestDriver {
|
||||||
@ -97,16 +100,20 @@ func InitHostPathCSIDriver() testsuites.TestDriver {
|
|||||||
}
|
}
|
||||||
return initHostPathCSIDriver("csi-hostpath",
|
return initHostPathCSIDriver("csi-hostpath",
|
||||||
capabilities,
|
capabilities,
|
||||||
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
|
// Volume attributes don't matter, but we have to provide at least one map.
|
||||||
|
[]map[string]string{
|
||||||
|
{"foo": "bar"},
|
||||||
|
},
|
||||||
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
|
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
|
||||||
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
|
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
|
||||||
"test/e2e/testing-manifests/storage-csi/external-snapshotter/rbac.yaml",
|
"test/e2e/testing-manifests/storage-csi/external-snapshotter/rbac.yaml",
|
||||||
"test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml",
|
"test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml",
|
||||||
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-attacher.yaml",
|
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-attacher.yaml",
|
||||||
|
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-driverinfo.yaml",
|
||||||
|
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml",
|
||||||
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-provisioner.yaml",
|
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-provisioner.yaml",
|
||||||
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-snapshotter.yaml",
|
|
||||||
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-resizer.yaml",
|
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-resizer.yaml",
|
||||||
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpathplugin.yaml",
|
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-snapshotter.yaml",
|
||||||
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/e2e-test-rbac.yaml",
|
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/e2e-test-rbac.yaml",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -116,6 +123,9 @@ func (h *hostpathCSIDriver) GetDriverInfo() *testsuites.DriverInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (h *hostpathCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
func (h *hostpathCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
|
||||||
|
if pattern.VolType == testpatterns.CSIInlineVolume && len(h.volumeAttributes) == 0 {
|
||||||
|
framework.Skipf("%s has no volume attributes defined, doesn't support ephemeral inline volumes", h.driverInfo.Name)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *hostpathCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
|
func (h *hostpathCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
|
||||||
@ -127,6 +137,14 @@ func (h *hostpathCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.P
|
|||||||
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (h *hostpathCSIDriver) GetVolume(config *testsuites.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) {
|
||||||
|
return h.volumeAttributes[volumeNumber%len(h.volumeAttributes)], false /* not shared */, false /* read-write */
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *hostpathCSIDriver) GetCSIDriverName(config *testsuites.PerTestConfig) string {
|
||||||
|
return config.GetUniqueDriverName()
|
||||||
|
}
|
||||||
|
|
||||||
func (h *hostpathCSIDriver) GetSnapshotClass(config *testsuites.PerTestConfig) *unstructured.Unstructured {
|
func (h *hostpathCSIDriver) GetSnapshotClass(config *testsuites.PerTestConfig) *unstructured.Unstructured {
|
||||||
snapshotter := config.GetUniqueDriverName()
|
snapshotter := config.GetUniqueDriverName()
|
||||||
parameters := map[string]string{}
|
parameters := map[string]string{}
|
||||||
@ -205,7 +223,6 @@ var _ testsuites.DynamicPVTestDriver = &mockCSIDriver{}
|
|||||||
// InitMockCSIDriver returns a mockCSIDriver that implements TestDriver interface
|
// InitMockCSIDriver returns a mockCSIDriver that implements TestDriver interface
|
||||||
func InitMockCSIDriver(driverOpts CSIMockDriverOpts) testsuites.TestDriver {
|
func InitMockCSIDriver(driverOpts CSIMockDriverOpts) testsuites.TestDriver {
|
||||||
driverManifests := []string{
|
driverManifests := []string{
|
||||||
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
|
|
||||||
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
|
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
|
||||||
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
|
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
|
||||||
"test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml",
|
"test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml",
|
||||||
@ -305,7 +322,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest
|
|||||||
NodeName: config.ClientNodeName,
|
NodeName: config.ClientNodeName,
|
||||||
PodInfo: m.podInfo,
|
PodInfo: m.podInfo,
|
||||||
CanAttach: &m.attachable,
|
CanAttach: &m.attachable,
|
||||||
VolumeLifecycleModes: []storagev1beta1.VolumeLifecycleMode{
|
VolumeLifecycleModes: &[]storagev1beta1.VolumeLifecycleMode{
|
||||||
storagev1beta1.VolumeLifecyclePersistent,
|
storagev1beta1.VolumeLifecyclePersistent,
|
||||||
storagev1beta1.VolumeLifecycleEphemeral,
|
storagev1beta1.VolumeLifecycleEphemeral,
|
||||||
},
|
},
|
||||||
@ -329,7 +346,10 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest
|
|||||||
func InitHostPathV0CSIDriver() testsuites.TestDriver {
|
func InitHostPathV0CSIDriver() testsuites.TestDriver {
|
||||||
return initHostPathCSIDriver("csi-hostpath-v0",
|
return initHostPathCSIDriver("csi-hostpath-v0",
|
||||||
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapMultiPODs: true},
|
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapMultiPODs: true},
|
||||||
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
|
nil, /* no volume attributes -> no ephemeral volume testing */
|
||||||
|
// Using the current set of rbac.yaml files is problematic here because they don't
|
||||||
|
// match the version of the rules that were written for the releases of external-attacher
|
||||||
|
// and external-provisioner that we are using here. It happens to work in practice...
|
||||||
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
|
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
|
||||||
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
|
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
|
||||||
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/csi-hostpath-attacher.yaml",
|
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/csi-hostpath-attacher.yaml",
|
||||||
@ -423,7 +443,6 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTes
|
|||||||
createGCESecrets(f.ClientSet, f.Namespace.Name)
|
createGCESecrets(f.ClientSet, f.Namespace.Name)
|
||||||
|
|
||||||
manifests := []string{
|
manifests := []string{
|
||||||
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
|
|
||||||
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
|
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
|
||||||
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
|
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
|
||||||
"test/e2e/testing-manifests/storage-csi/gce-pd/csi-controller-rbac.yaml",
|
"test/e2e/testing-manifests/storage-csi/gce-pd/csi-controller-rbac.yaml",
|
||||||
|
30
test/e2e/storage/external/external.go
vendored
30
test/e2e/storage/external/external.go
vendored
@ -179,14 +179,25 @@ type driverDefinition struct {
|
|||||||
// TODO (?): load from file
|
// TODO (?): load from file
|
||||||
}
|
}
|
||||||
|
|
||||||
// InlineVolumeAttributes defines one or more set of attributes for
|
// InlineVolumes defines one or more volumes for use as inline
|
||||||
// use as inline ephemeral volumes. At least one set of attributes
|
// ephemeral volumes. At least one such volume has to be
|
||||||
// has to be defined to enable testing of inline ephemeral volumes.
|
// defined to enable testing of inline ephemeral volumes. If
|
||||||
// If a test needs more volumes than defined, some of the defined
|
// a test needs more volumes than defined, some of the defined
|
||||||
// volumes will be used multiple times.
|
// volumes will be used multiple times.
|
||||||
//
|
//
|
||||||
// DriverInfo.Name is used as name of the driver in the inline volume.
|
// DriverInfo.Name is used as name of the driver in the inline volume.
|
||||||
InlineVolumeAttributes []map[string]string
|
InlineVolumes []struct {
|
||||||
|
// Attributes are passed as NodePublishVolumeReq.volume_context.
|
||||||
|
// Can be empty.
|
||||||
|
Attributes map[string]string
|
||||||
|
// Shared defines whether the resulting volume is
|
||||||
|
// shared between different pods (i.e. changes made
|
||||||
|
// in one pod are visible in another)
|
||||||
|
Shared bool
|
||||||
|
// ReadOnly must be set to true if the driver does not
|
||||||
|
// support mounting as read/write.
|
||||||
|
ReadOnly bool
|
||||||
|
}
|
||||||
|
|
||||||
// ClaimSize defines the desired size of dynamically
|
// ClaimSize defines the desired size of dynamically
|
||||||
// provisioned volumes. Default is "5GiB".
|
// provisioned volumes. Default is "5GiB".
|
||||||
@ -221,7 +232,7 @@ func (d *driverDefinition) SkipUnsupportedTest(pattern testpatterns.TestPattern)
|
|||||||
supported = true
|
supported = true
|
||||||
}
|
}
|
||||||
case testpatterns.CSIInlineVolume:
|
case testpatterns.CSIInlineVolume:
|
||||||
supported = len(d.InlineVolumeAttributes) != 0
|
supported = len(d.InlineVolumes) != 0
|
||||||
}
|
}
|
||||||
if !supported {
|
if !supported {
|
||||||
framework.Skipf("Driver %q does not support volume type %q - skipping", d.DriverInfo.Name, pattern.VolType)
|
framework.Skipf("Driver %q does not support volume type %q - skipping", d.DriverInfo.Name, pattern.VolType)
|
||||||
@ -294,11 +305,12 @@ func (d *driverDefinition) GetClaimSize() string {
|
|||||||
return d.ClaimSize
|
return d.ClaimSize
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *driverDefinition) GetVolumeAttributes(config *testsuites.PerTestConfig, volumeNumber int) map[string]string {
|
func (d *driverDefinition) GetVolume(config *testsuites.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) {
|
||||||
if len(d.InlineVolumeAttributes) == 0 {
|
if len(d.InlineVolumes) == 0 {
|
||||||
framework.Skipf("%s does not have any InlineVolumeAttributes defined", d.DriverInfo.Name)
|
framework.Skipf("%s does not have any InlineVolumeAttributes defined", d.DriverInfo.Name)
|
||||||
}
|
}
|
||||||
return d.InlineVolumeAttributes[volumeNumber%len(d.InlineVolumeAttributes)]
|
volume := d.InlineVolumes[volumeNumber%len(d.InlineVolumes)]
|
||||||
|
return volume.Attributes, volume.Shared, volume.ReadOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *driverDefinition) GetCSIDriverName(config *testsuites.PerTestConfig) string {
|
func (d *driverDefinition) GetCSIDriverName(config *testsuites.PerTestConfig) string {
|
||||||
|
@ -17,7 +17,9 @@ limitations under the License.
|
|||||||
package testsuites
|
package testsuites
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
"github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
@ -30,6 +32,7 @@ import (
|
|||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||||
|
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ephemeralTestSuite struct {
|
type ephemeralTestSuite struct {
|
||||||
@ -97,8 +100,8 @@ func (p *ephemeralTestSuite) defineTests(driver TestDriver, pattern testpatterns
|
|||||||
Namespace: f.Namespace.Name,
|
Namespace: f.Namespace.Name,
|
||||||
DriverName: eDriver.GetCSIDriverName(l.config),
|
DriverName: eDriver.GetCSIDriverName(l.config),
|
||||||
Node: e2epod.NodeSelection{Name: l.config.ClientNodeName},
|
Node: e2epod.NodeSelection{Name: l.config.ClientNodeName},
|
||||||
GetVolumeAttributes: func(volumeNumber int) map[string]string {
|
GetVolume: func(volumeNumber int) (map[string]string, bool, bool) {
|
||||||
return eDriver.GetVolumeAttributes(l.config, volumeNumber)
|
return eDriver.GetVolume(l.config, volumeNumber)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -110,10 +113,73 @@ func (p *ephemeralTestSuite) defineTests(driver TestDriver, pattern testpatterns
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.It("should create inline ephemeral volume", func() {
|
ginkgo.It("should create read-only inline ephemeral volume", func() {
|
||||||
init()
|
init()
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
|
l.testCase.ReadOnly = true
|
||||||
|
l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} {
|
||||||
|
storageutils.VerifyExecInPodSucceed(pod, "mount | grep /mnt/test | grep ro,")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
l.testCase.TestEphemeral()
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("should create read/write inline ephemeral volume", func() {
|
||||||
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
l.testCase.ReadOnly = false
|
||||||
|
l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} {
|
||||||
|
storageutils.VerifyExecInPodSucceed(pod, "mount | grep /mnt/test | grep rw,")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
l.testCase.TestEphemeral()
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.It("should support two pods which share the same volume", func() {
|
||||||
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// We test in read-only mode if that is all that the driver supports,
|
||||||
|
// otherwise read/write.
|
||||||
|
_, shared, readOnly := eDriver.GetVolume(l.config, 0)
|
||||||
|
|
||||||
|
l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} {
|
||||||
|
// Create another pod with the same inline volume attributes.
|
||||||
|
pod2 := StartInPodWithInlineVolume(f.ClientSet, f.Namespace.Name, "inline-volume-tester2", "sleep 100000",
|
||||||
|
[]v1.CSIVolumeSource{*pod.Spec.Volumes[0].CSI},
|
||||||
|
readOnly,
|
||||||
|
l.testCase.Node)
|
||||||
|
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, pod2.Name, pod2.Namespace), "waiting for second pod with inline volume")
|
||||||
|
|
||||||
|
// If (and only if) we were able to mount
|
||||||
|
// read/write and volume data is not shared
|
||||||
|
// between pods, then we can check whether
|
||||||
|
// data written in one pod is really not
|
||||||
|
// visible in the other.
|
||||||
|
if !readOnly && !shared {
|
||||||
|
ginkgo.By("writing data in one pod and checking for it in the second")
|
||||||
|
storageutils.VerifyExecInPodSucceed(pod, "touch /mnt/test-0/hello-world")
|
||||||
|
storageutils.VerifyExecInPodSucceed(pod2, "[ ! -f /mnt/test-0/hello-world ]")
|
||||||
|
}
|
||||||
|
|
||||||
|
defer StopPod(f.ClientSet, pod2)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
l.testCase.TestEphemeral()
|
||||||
|
})
|
||||||
|
|
||||||
|
var numInlineVolumes = flag.Int("storage.ephemeral."+strings.Replace(driver.GetDriverInfo().Name, ".", "-", -1)+".numInlineVolumes",
|
||||||
|
2, "number of ephemeral inline volumes per pod")
|
||||||
|
|
||||||
|
ginkgo.It("should support multiple inline ephemeral volumes", func() {
|
||||||
|
init()
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
l.testCase.NumInlineVolumes = *numInlineVolumes
|
||||||
|
gomega.Expect(*numInlineVolumes).To(gomega.BeNumerically(">", 0), "positive number of inline volumes")
|
||||||
l.testCase.TestEphemeral()
|
l.testCase.TestEphemeral()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -126,12 +192,18 @@ type EphemeralTest struct {
|
|||||||
DriverName string
|
DriverName string
|
||||||
Node e2epod.NodeSelection
|
Node e2epod.NodeSelection
|
||||||
|
|
||||||
// GetVolumeAttributes returns the volume attributes for a
|
// GetVolume returns the volume attributes for a
|
||||||
// certain inline ephemeral volume, enumerated starting with
|
// certain inline ephemeral volume, enumerated starting with
|
||||||
// #0. Some tests might require more than one volume. They can
|
// #0. Some tests might require more than one volume. They can
|
||||||
// all be the same or different, depending what the driver supports
|
// all be the same or different, depending what the driver supports
|
||||||
// and/or wants to test.
|
// and/or wants to test.
|
||||||
GetVolumeAttributes func(volumeNumber int) map[string]string
|
//
|
||||||
|
// For each volume, the test driver can specify the
|
||||||
|
// attributes, whether two pods using those attributes will
|
||||||
|
// end up sharing the same backend storage (i.e. changes made
|
||||||
|
// in one pod will be visible in the other), and whether
|
||||||
|
// the volume can be mounted read/write or only read-only.
|
||||||
|
GetVolume func(volumeNumber int) (attributes map[string]string, shared bool, readOnly bool)
|
||||||
|
|
||||||
// RunningPodCheck is invoked while a pod using an inline volume is running.
|
// RunningPodCheck is invoked while a pod using an inline volume is running.
|
||||||
// It can execute additional checks on the pod and its volume(s). Any data
|
// It can execute additional checks on the pod and its volume(s). Any data
|
||||||
@ -145,28 +217,46 @@ type EphemeralTest struct {
|
|||||||
// removed. How to do such a check is driver-specific and not
|
// removed. How to do such a check is driver-specific and not
|
||||||
// covered by the generic storage test suite.
|
// covered by the generic storage test suite.
|
||||||
StoppedPodCheck func(nodeName string, runningPodData interface{})
|
StoppedPodCheck func(nodeName string, runningPodData interface{})
|
||||||
|
|
||||||
|
// NumInlineVolumes sets the number of ephemeral inline volumes per pod.
|
||||||
|
// Unset (= zero) is the same as one.
|
||||||
|
NumInlineVolumes int
|
||||||
|
|
||||||
|
// ReadOnly limits mounting to read-only.
|
||||||
|
ReadOnly bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestEphemeral tests pod creation with one ephemeral volume.
|
// TestEphemeral tests pod creation with one ephemeral volume.
|
||||||
func (t EphemeralTest) TestEphemeral() {
|
func (t EphemeralTest) TestEphemeral() {
|
||||||
client := t.Client
|
client := t.Client
|
||||||
gomega.Expect(client).NotTo(gomega.BeNil(), "EphemeralTest.Client is required")
|
gomega.Expect(client).NotTo(gomega.BeNil(), "EphemeralTest.Client is required")
|
||||||
gomega.Expect(t.GetVolumeAttributes).NotTo(gomega.BeNil(), "EphemeralTest.GetVolumeAttributes is required")
|
gomega.Expect(t.GetVolume).NotTo(gomega.BeNil(), "EphemeralTest.GetVolume is required")
|
||||||
gomega.Expect(t.DriverName).NotTo(gomega.BeEmpty(), "EphemeralTest.DriverName is required")
|
gomega.Expect(t.DriverName).NotTo(gomega.BeEmpty(), "EphemeralTest.DriverName is required")
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("checking the requested inline volume exists in the pod running on node %+v", t.Node))
|
ginkgo.By(fmt.Sprintf("checking the requested inline volume exists in the pod running on node %+v", t.Node))
|
||||||
command := "mount | grep /mnt/test"
|
command := "mount | grep /mnt/test && sleep 10000"
|
||||||
pod := StartInPodWithInlineVolume(client, t.Namespace, "inline-volume-tester", command,
|
var csiVolumes []v1.CSIVolumeSource
|
||||||
v1.CSIVolumeSource{
|
numVolumes := t.NumInlineVolumes
|
||||||
|
if numVolumes == 0 {
|
||||||
|
numVolumes = 1
|
||||||
|
}
|
||||||
|
for i := 0; i < numVolumes; i++ {
|
||||||
|
attributes, _, readOnly := t.GetVolume(i)
|
||||||
|
csi := v1.CSIVolumeSource{
|
||||||
Driver: t.DriverName,
|
Driver: t.DriverName,
|
||||||
VolumeAttributes: t.GetVolumeAttributes(0),
|
VolumeAttributes: attributes,
|
||||||
},
|
}
|
||||||
t.Node)
|
if readOnly && !t.ReadOnly {
|
||||||
|
framework.Skipf("inline ephemeral volume #%d is read-only, but the test needs a read/write volume", i)
|
||||||
|
}
|
||||||
|
csiVolumes = append(csiVolumes, csi)
|
||||||
|
}
|
||||||
|
pod := StartInPodWithInlineVolume(client, t.Namespace, "inline-volume-tester", command, csiVolumes, t.ReadOnly, t.Node)
|
||||||
defer func() {
|
defer func() {
|
||||||
// pod might be nil now.
|
// pod might be nil now.
|
||||||
StopPod(client, pod)
|
StopPod(client, pod)
|
||||||
}()
|
}()
|
||||||
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace), "waiting for pod with inline volume")
|
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(client, pod.Name, pod.Namespace), "waiting for pod with inline volume")
|
||||||
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
|
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err, "get pod")
|
framework.ExpectNoError(err, "get pod")
|
||||||
actualNodeName := runningPod.Spec.NodeName
|
actualNodeName := runningPod.Spec.NodeName
|
||||||
@ -185,9 +275,9 @@ func (t EphemeralTest) TestEphemeral() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartInPodWithInlineVolume starts a command in a pod with given volume mounted to /mnt/test directory.
|
// StartInPodWithInlineVolume starts a command in a pod with given volume(s) mounted to /mnt/test-<number> directory.
|
||||||
// The caller is responsible for checking the pod and deleting it.
|
// The caller is responsible for checking the pod and deleting it.
|
||||||
func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command string, csiVolume v1.CSIVolumeSource, node e2epod.NodeSelection) *v1.Pod {
|
func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command string, csiVolumes []v1.CSIVolumeSource, readOnly bool, node e2epod.NodeSelection) *v1.Pod {
|
||||||
pod := &v1.Pod{
|
pod := &v1.Pod{
|
||||||
TypeMeta: metav1.TypeMeta{
|
TypeMeta: metav1.TypeMeta{
|
||||||
Kind: "Pod",
|
Kind: "Pod",
|
||||||
@ -208,26 +298,29 @@ func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command stri
|
|||||||
Name: "csi-volume-tester",
|
Name: "csi-volume-tester",
|
||||||
Image: volume.GetTestImage(framework.BusyBoxImage),
|
Image: volume.GetTestImage(framework.BusyBoxImage),
|
||||||
Command: volume.GenerateScriptCmd(command),
|
Command: volume.GenerateScriptCmd(command),
|
||||||
VolumeMounts: []v1.VolumeMount{
|
|
||||||
{
|
|
||||||
Name: "my-volume",
|
|
||||||
MountPath: "/mnt/test",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
RestartPolicy: v1.RestartPolicyNever,
|
RestartPolicy: v1.RestartPolicyNever,
|
||||||
Volumes: []v1.Volume{
|
|
||||||
{
|
|
||||||
Name: "my-volume",
|
|
||||||
VolumeSource: v1.VolumeSource{
|
|
||||||
CSI: &csiVolume,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for i, csiVolume := range csiVolumes {
|
||||||
|
name := fmt.Sprintf("my-volume-%d", i)
|
||||||
|
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts,
|
||||||
|
v1.VolumeMount{
|
||||||
|
Name: name,
|
||||||
|
MountPath: fmt.Sprintf("/mnt/test-%d", i),
|
||||||
|
ReadOnly: readOnly,
|
||||||
|
})
|
||||||
|
pod.Spec.Volumes = append(pod.Spec.Volumes,
|
||||||
|
v1.Volume{
|
||||||
|
Name: name,
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
CSI: &csiVolume,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
pod, err := c.CoreV1().Pods(ns).Create(pod)
|
pod, err := c.CoreV1().Pods(ns).Create(pod)
|
||||||
framework.ExpectNoError(err, "failed to create pod")
|
framework.ExpectNoError(err, "failed to create pod")
|
||||||
return pod
|
return pod
|
||||||
|
@ -588,7 +588,8 @@ func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command
|
|||||||
return pod
|
return pod
|
||||||
}
|
}
|
||||||
|
|
||||||
// StopPod first tries to log the output of the pod's container, then deletes the pod.
|
// StopPod first tries to log the output of the pod's container, then deletes the pod and
|
||||||
|
// waits for that to succeed.
|
||||||
func StopPod(c clientset.Interface, pod *v1.Pod) {
|
func StopPod(c clientset.Interface, pod *v1.Pod) {
|
||||||
if pod == nil {
|
if pod == nil {
|
||||||
return
|
return
|
||||||
@ -600,6 +601,7 @@ func StopPod(c clientset.Interface, pod *v1.Pod) {
|
|||||||
e2elog.Logf("Pod %s has the following logs: %s", pod.Name, body)
|
e2elog.Logf("Pod %s has the following logs: %s", pod.Name, body)
|
||||||
}
|
}
|
||||||
e2epod.DeletePodOrFail(c, pod.Namespace, pod.Name)
|
e2epod.DeletePodOrFail(c, pod.Namespace, pod.Name)
|
||||||
|
e2epod.WaitForPodNoLongerRunningInNamespace(c, pod.Name, pod.Namespace)
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) {
|
func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) {
|
||||||
|
@ -105,12 +105,17 @@ type DynamicPVTestDriver interface {
|
|||||||
type EphemeralTestDriver interface {
|
type EphemeralTestDriver interface {
|
||||||
TestDriver
|
TestDriver
|
||||||
|
|
||||||
// GetVolumeAttributes returns the volume attributes for a
|
// GetVolume returns the volume attributes for a certain
|
||||||
// certain inline ephemeral volume, enumerated starting with
|
// inline ephemeral volume, enumerated starting with #0. Some
|
||||||
// #0. Some tests might require more than one volume. They can
|
// tests might require more than one volume. They can all be
|
||||||
// all be the same or different, depending what the driver supports
|
// the same or different, depending what the driver supports
|
||||||
// and/or wants to test.
|
// and/or wants to test.
|
||||||
GetVolumeAttributes(config *PerTestConfig, volumeNumber int) map[string]string
|
//
|
||||||
|
// For each volume, the test driver can return volume attributes,
|
||||||
|
// whether the resulting volume is shared between different pods (i.e.
|
||||||
|
// changes made in one pod are visible in another), and whether the
|
||||||
|
// volume can be mounted read/write or only read-only.
|
||||||
|
GetVolume(config *PerTestConfig, volumeNumber int) (attributes map[string]string, shared bool, readOnly bool)
|
||||||
|
|
||||||
// GetCSIDriverName returns the name that was used when registering with
|
// GetCSIDriverName returns the name that was used when registering with
|
||||||
// kubelet. Depending on how the driver was deployed, this can be different
|
// kubelet. Depending on how the driver was deployed, this can be different
|
||||||
|
@ -131,7 +131,9 @@ func PatchCSIDeployment(f *framework.Framework, o PatchCSIOptions, object interf
|
|||||||
if o.CanAttach != nil {
|
if o.CanAttach != nil {
|
||||||
object.Spec.AttachRequired = o.CanAttach
|
object.Spec.AttachRequired = o.CanAttach
|
||||||
}
|
}
|
||||||
object.Spec.VolumeLifecycleModes = o.VolumeLifecycleModes
|
if o.VolumeLifecycleModes != nil {
|
||||||
|
object.Spec.VolumeLifecycleModes = *o.VolumeLifecycleModes
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -171,8 +173,8 @@ type PatchCSIOptions struct {
|
|||||||
// field *if* the driver deploys a CSIDriver object. Ignored
|
// field *if* the driver deploys a CSIDriver object. Ignored
|
||||||
// otherwise.
|
// otherwise.
|
||||||
CanAttach *bool
|
CanAttach *bool
|
||||||
// The value to use for the CSIDriver.Spec.VolumeLifecycleModes
|
// If not nil, the value to use for the CSIDriver.Spec.VolumeLifecycleModes
|
||||||
// field *if* the driver deploys a CSIDriver object. Ignored
|
// field *if* the driver deploys a CSIDriver object. Ignored
|
||||||
// otherwise.
|
// otherwise.
|
||||||
VolumeLifecycleModes []storagev1beta1.VolumeLifecycleMode
|
VolumeLifecycleModes *[]storagev1beta1.VolumeLifecycleMode
|
||||||
}
|
}
|
||||||
|
@ -1 +0,0 @@
|
|||||||
The original file is (or will be) https://github.com/kubernetes-csi/driver-registrar/blob/master/deploy/kubernetes/rbac.yaml
|
|
@ -1,51 +0,0 @@
|
|||||||
# This YAML file contains all RBAC objects that are necessary to run external
|
|
||||||
# CSI provisioner.
|
|
||||||
#
|
|
||||||
# In production, each CSI driver deployment has to be customized:
|
|
||||||
# - to avoid conflicts, use non-default namespace and different names
|
|
||||||
# for non-namespaced entities like the ClusterRole
|
|
||||||
# - decide whether the deployment replicates the external CSI
|
|
||||||
# provisioner, in which case leadership election must be enabled;
|
|
||||||
# this influences the RBAC setup, see below
|
|
||||||
|
|
||||||
apiVersion: v1
|
|
||||||
kind: ServiceAccount
|
|
||||||
metadata:
|
|
||||||
name: csi-node-sa
|
|
||||||
# replace with non-default namespace name
|
|
||||||
namespace: default
|
|
||||||
|
|
||||||
---
|
|
||||||
kind: ClusterRole
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: driver-registrar-runner
|
|
||||||
rules:
|
|
||||||
- apiGroups: [""]
|
|
||||||
resources: ["events"]
|
|
||||||
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
|
||||||
# The following permissions are only needed when running
|
|
||||||
# driver-registrar without the --kubelet-registration-path
|
|
||||||
# parameter, i.e. when using driver-registrar instead of
|
|
||||||
# kubelet to update the csi.volume.kubernetes.io/nodeid
|
|
||||||
# annotation. That mode of operation is going to be deprecated
|
|
||||||
# and should not be used anymore, but is needed on older
|
|
||||||
# Kubernetes versions.
|
|
||||||
# - apiGroups: [""]
|
|
||||||
# resources: ["nodes"]
|
|
||||||
# verbs: ["get", "update", "patch"]
|
|
||||||
|
|
||||||
---
|
|
||||||
kind: ClusterRoleBinding
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: csi-driver-registrar-role
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: csi-node-sa
|
|
||||||
# replace with non-default namespace name
|
|
||||||
namespace: default
|
|
||||||
roleRef:
|
|
||||||
kind: ClusterRole
|
|
||||||
name: driver-registrar-runner
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
@ -1 +1 @@
|
|||||||
The original file is (or will be) https://github.com/kubernetes-csi/external-attacher/blob/master/deploy/kubernetes/rbac.yaml
|
The original file is https://github.com/kubernetes-csi/external-attacher/blob/<version>/deploy/kubernetes/rbac.yaml
|
||||||
|
@ -1 +1 @@
|
|||||||
The original file is (or will be) https://github.com/kubernetes-csi/external-provisioner/blob/master/deploy/kubernetes/rbac.yaml
|
The original file is https://github.com/kubernetes-csi/external-provisioner/blob/<version>/deploy/kubernetes/rbac.yaml
|
||||||
|
@ -1 +1 @@
|
|||||||
The original file is (or will be) https://github.com/kubernetes-csi/external-resizer/blob/master/deploy/kubernetes/rbac.yaml
|
The original file is https://github.com/kubernetes-csi/external-resizer/blob/<version>/deploy/kubernetes/rbac.yaml
|
||||||
|
@ -36,9 +36,6 @@ rules:
|
|||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["persistentvolumeclaims/status"]
|
resources: ["persistentvolumeclaims/status"]
|
||||||
verbs: ["update", "patch"]
|
verbs: ["update", "patch"]
|
||||||
- apiGroups: ["storage.k8s.io"]
|
|
||||||
resources: ["storageclasses"]
|
|
||||||
verbs: ["get", "list", "watch"]
|
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["events"]
|
resources: ["events"]
|
||||||
verbs: ["list", "watch", "create", "update", "patch"]
|
verbs: ["list", "watch", "create", "update", "patch"]
|
||||||
@ -59,7 +56,7 @@ roleRef:
|
|||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
|
||||||
---
|
---
|
||||||
# Resizer must be able to work with leases in current namespace
|
# Resizer must be able to work with end point in current namespace
|
||||||
# if (and only if) leadership election is enabled
|
# if (and only if) leadership election is enabled
|
||||||
kind: Role
|
kind: Role
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
@ -1 +1 @@
|
|||||||
The original file is https://github.com/kubernetes-csi/external-snapshotter/blob/master/deploy/kubernetes/rbac.yaml
|
The original file is https://github.com/kubernetes-csi/external-snapshotter/blob/<version>/deploy/kubernetes/rbac.yaml
|
||||||
|
@ -20,36 +20,36 @@ metadata:
|
|||||||
# rename if there are conflicts
|
# rename if there are conflicts
|
||||||
name: external-snapshotter-runner
|
name: external-snapshotter-runner
|
||||||
rules:
|
rules:
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["persistentvolumes"]
|
resources: ["persistentvolumes"]
|
||||||
verbs: ["get", "list", "watch"]
|
verbs: ["get", "list", "watch"]
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["persistentvolumeclaims"]
|
resources: ["persistentvolumeclaims"]
|
||||||
verbs: ["get", "list", "watch", "update"]
|
verbs: ["get", "list", "watch", "update"]
|
||||||
- apiGroups: ["storage.k8s.io"]
|
- apiGroups: ["storage.k8s.io"]
|
||||||
resources: ["storageclasses"]
|
resources: ["storageclasses"]
|
||||||
verbs: ["get", "list", "watch"]
|
verbs: ["get", "list", "watch"]
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["events"]
|
resources: ["events"]
|
||||||
verbs: ["list", "watch", "create", "update", "patch"]
|
verbs: ["list", "watch", "create", "update", "patch"]
|
||||||
- apiGroups: [""]
|
- apiGroups: [""]
|
||||||
resources: ["secrets"]
|
resources: ["secrets"]
|
||||||
verbs: ["get", "list"]
|
verbs: ["get", "list"]
|
||||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
resources: ["volumesnapshotclasses"]
|
resources: ["volumesnapshotclasses"]
|
||||||
verbs: ["get", "list", "watch"]
|
verbs: ["get", "list", "watch"]
|
||||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
resources: ["volumesnapshotcontents"]
|
resources: ["volumesnapshotcontents"]
|
||||||
verbs: ["create", "get", "list", "watch", "update", "delete"]
|
verbs: ["create", "get", "list", "watch", "update", "delete"]
|
||||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
resources: ["volumesnapshots"]
|
resources: ["volumesnapshots"]
|
||||||
verbs: ["get", "list", "watch", "update"]
|
verbs: ["get", "list", "watch", "update"]
|
||||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
resources: ["volumesnapshots/status"]
|
resources: ["volumesnapshots/status"]
|
||||||
verbs: ["update"]
|
verbs: ["update"]
|
||||||
- apiGroups: ["apiextensions.k8s.io"]
|
- apiGroups: ["apiextensions.k8s.io"]
|
||||||
resources: ["customresourcedefinitions"]
|
resources: ["customresourcedefinitions"]
|
||||||
verbs: ["create", "list", "watch", "delete"]
|
verbs: ["create", "list", "watch", "delete", "get", "update"]
|
||||||
|
|
||||||
---
|
---
|
||||||
kind: ClusterRoleBinding
|
kind: ClusterRoleBinding
|
||||||
@ -57,12 +57,39 @@ apiVersion: rbac.authorization.k8s.io/v1
|
|||||||
metadata:
|
metadata:
|
||||||
name: csi-snapshotter-role
|
name: csi-snapshotter-role
|
||||||
subjects:
|
subjects:
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: csi-snapshotter
|
name: csi-snapshotter
|
||||||
# replace with non-default namespace name
|
# replace with non-default namespace name
|
||||||
namespace: default
|
namespace: default
|
||||||
roleRef:
|
roleRef:
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
# change the name also here if the ClusterRole gets renamed
|
# change the name also here if the ClusterRole gets renamed
|
||||||
name: external-snapshotter-runner
|
name: external-snapshotter-runner
|
||||||
apiGroup: rbac.authorization.k8s.io
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: Role
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
namespace: default # TODO: replace with the namespace you want for your sidecar
|
||||||
|
name: external-snapshotter-leaderelection
|
||||||
|
rules:
|
||||||
|
- apiGroups: ["coordination.k8s.io"]
|
||||||
|
resources: ["leases"]
|
||||||
|
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: RoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: external-snapshotter-leaderelection
|
||||||
|
namespace: default # TODO: replace with the namespace you want for your sidecar
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: csi-snapshotter
|
||||||
|
namespace: default # TODO: replace with the namespace you want for your sidecar
|
||||||
|
roleRef:
|
||||||
|
kind: Role
|
||||||
|
name: external-snapshotter-leaderelection
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
|
||||||
|
@ -69,9 +69,6 @@ subjects:
|
|||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: csi-controller-sa
|
name: csi-controller-sa
|
||||||
namespace: default
|
namespace: default
|
||||||
- kind: ServiceAccount
|
|
||||||
name: csi-node-sa
|
|
||||||
namespace: default
|
|
||||||
roleRef:
|
roleRef:
|
||||||
kind: ClusterRole
|
kind: ClusterRole
|
||||||
name: e2e-test-privileged-psp
|
name: e2e-test-privileged-psp
|
||||||
|
@ -11,7 +11,6 @@ spec:
|
|||||||
labels:
|
labels:
|
||||||
app: gcp-compute-persistent-disk-csi-driver
|
app: gcp-compute-persistent-disk-csi-driver
|
||||||
spec:
|
spec:
|
||||||
serviceAccountName: csi-node-sa
|
|
||||||
containers:
|
containers:
|
||||||
- name: csi-driver-registrar
|
- name: csi-driver-registrar
|
||||||
image: gcr.io/gke-release/csi-node-driver-registrar:v1.1.0-gke.0
|
image: gcr.io/gke-release/csi-node-driver-registrar:v1.1.0-gke.0
|
||||||
|
@ -11,7 +11,6 @@ spec:
|
|||||||
labels:
|
labels:
|
||||||
app: csi-hostpathplugin
|
app: csi-hostpathplugin
|
||||||
spec:
|
spec:
|
||||||
serviceAccountName: csi-node-sa
|
|
||||||
hostNetwork: true
|
hostNetwork: true
|
||||||
containers:
|
containers:
|
||||||
- name: driver-registrar
|
- name: driver-registrar
|
||||||
|
@ -7,9 +7,6 @@ subjects:
|
|||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: csi-attacher
|
name: csi-attacher
|
||||||
namespace: default
|
namespace: default
|
||||||
- kind: ServiceAccount
|
|
||||||
name: csi-node-sa
|
|
||||||
namespace: default
|
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: csi-provisioner
|
name: csi-provisioner
|
||||||
namespace: default
|
namespace: default
|
||||||
|
@ -27,20 +27,27 @@ spec:
|
|||||||
labels:
|
labels:
|
||||||
app: csi-hostpath-attacher
|
app: csi-hostpath-attacher
|
||||||
spec:
|
spec:
|
||||||
|
affinity:
|
||||||
|
podAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- labelSelector:
|
||||||
|
matchExpressions:
|
||||||
|
- key: app
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- csi-hostpathplugin
|
||||||
|
topologyKey: kubernetes.io/hostname
|
||||||
serviceAccountName: csi-attacher
|
serviceAccountName: csi-attacher
|
||||||
containers:
|
containers:
|
||||||
- name: csi-attacher
|
- name: csi-attacher
|
||||||
image: quay.io/k8scsi/csi-attacher:v1.2.0
|
image: quay.io/k8scsi/csi-attacher:v1.2.0
|
||||||
args:
|
args:
|
||||||
- --v=5
|
- --v=5
|
||||||
- --csi-address=$(ADDRESS)
|
- --csi-address=/csi/csi.sock
|
||||||
env:
|
|
||||||
- name: ADDRESS
|
|
||||||
value: /csi/csi.sock
|
|
||||||
imagePullPolicy: Always
|
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- mountPath: /csi
|
- mountPath: /csi
|
||||||
name: socket-dir
|
name: socket-dir
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
- hostPath:
|
- hostPath:
|
||||||
path: /var/lib/kubelet/plugins/csi-hostpath
|
path: /var/lib/kubelet/plugins/csi-hostpath
|
||||||
|
@ -0,0 +1,10 @@
|
|||||||
|
apiVersion: storage.k8s.io/v1beta1
|
||||||
|
kind: CSIDriver
|
||||||
|
metadata:
|
||||||
|
name: hostpath.csi.k8s.io
|
||||||
|
spec:
|
||||||
|
# Supports both modes, but needs pod info for that to determine the actual mode.
|
||||||
|
podInfoOnMount: true
|
||||||
|
volumeLifecycleModes:
|
||||||
|
- Persistent
|
||||||
|
- Ephemeral
|
@ -1,8 +1,31 @@
|
|||||||
kind: DaemonSet
|
# Service defined here, plus serviceName below in StatefulSet,
|
||||||
|
# are needed only because of condition explained in
|
||||||
|
# https://github.com/kubernetes/kubernetes/issues/69608
|
||||||
|
|
||||||
|
kind: Service
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: csi-hostpathplugin
|
||||||
|
labels:
|
||||||
|
app: csi-hostpathplugin
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: csi-hostpathplugin
|
||||||
|
ports:
|
||||||
|
- name: dummy
|
||||||
|
port: 12345
|
||||||
|
---
|
||||||
|
kind: StatefulSet
|
||||||
apiVersion: apps/v1
|
apiVersion: apps/v1
|
||||||
metadata:
|
metadata:
|
||||||
name: csi-hostpathplugin
|
name: csi-hostpathplugin
|
||||||
spec:
|
spec:
|
||||||
|
serviceName: "csi-hostpathplugin"
|
||||||
|
# One replica only:
|
||||||
|
# Host path driver only works when everything runs
|
||||||
|
# on a single node. We achieve that by starting it once and then
|
||||||
|
# co-locate all other pods via inter-pod affinity
|
||||||
|
replicas: 1
|
||||||
selector:
|
selector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
app: csi-hostpathplugin
|
app: csi-hostpathplugin
|
||||||
@ -11,30 +34,38 @@ spec:
|
|||||||
labels:
|
labels:
|
||||||
app: csi-hostpathplugin
|
app: csi-hostpathplugin
|
||||||
spec:
|
spec:
|
||||||
serviceAccountName: csi-node-sa
|
|
||||||
hostNetwork: true
|
hostNetwork: true
|
||||||
containers:
|
containers:
|
||||||
- name: driver-registrar
|
- name: node-driver-registrar
|
||||||
image: quay.io/k8scsi/csi-node-driver-registrar:v1.1.0
|
image: quay.io/k8scsi/csi-node-driver-registrar:v1.1.0
|
||||||
|
lifecycle:
|
||||||
|
preStop:
|
||||||
|
exec:
|
||||||
|
command: ["/bin/sh", "-c", "rm -rf /registration/csi-hostpath /registration/csi-hostpath-reg.sock"]
|
||||||
args:
|
args:
|
||||||
- --v=5
|
- --v=5
|
||||||
- --csi-address=/csi/csi.sock
|
- --csi-address=/csi/csi.sock
|
||||||
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock
|
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
env:
|
env:
|
||||||
- name: KUBE_NODE_NAME
|
- name: KUBE_NODE_NAME
|
||||||
valueFrom:
|
valueFrom:
|
||||||
fieldRef:
|
fieldRef:
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
fieldPath: spec.nodeName
|
fieldPath: spec.nodeName
|
||||||
imagePullPolicy: Always
|
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- mountPath: /csi
|
- mountPath: /csi
|
||||||
name: socket-dir
|
name: socket-dir
|
||||||
- mountPath: /registration
|
- mountPath: /registration
|
||||||
name: registration-dir
|
name: registration-dir
|
||||||
|
- mountPath: /csi-data-dir
|
||||||
|
name: csi-data-dir
|
||||||
|
|
||||||
- name: hostpath
|
- name: hostpath
|
||||||
image: quay.io/k8scsi/hostpathplugin:v1.2.0-rc5
|
image: quay.io/k8scsi/hostpathplugin:v1.2.0-rc5
|
||||||
args:
|
args:
|
||||||
|
- "--drivername=hostpath.csi.k8s.io"
|
||||||
- "--v=5"
|
- "--v=5"
|
||||||
- "--endpoint=$(CSI_ENDPOINT)"
|
- "--endpoint=$(CSI_ENDPOINT)"
|
||||||
- "--nodeid=$(KUBE_NODE_NAME)"
|
- "--nodeid=$(KUBE_NODE_NAME)"
|
||||||
@ -46,9 +77,20 @@ spec:
|
|||||||
fieldRef:
|
fieldRef:
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
fieldPath: spec.nodeName
|
fieldPath: spec.nodeName
|
||||||
imagePullPolicy: Always
|
|
||||||
securityContext:
|
securityContext:
|
||||||
privileged: true
|
privileged: true
|
||||||
|
ports:
|
||||||
|
- containerPort: 9898
|
||||||
|
name: healthz
|
||||||
|
protocol: TCP
|
||||||
|
livenessProbe:
|
||||||
|
failureThreshold: 5
|
||||||
|
httpGet:
|
||||||
|
path: /healthz
|
||||||
|
port: healthz
|
||||||
|
initialDelaySeconds: 10
|
||||||
|
timeoutSeconds: 3
|
||||||
|
periodSeconds: 2
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- mountPath: /csi
|
- mountPath: /csi
|
||||||
name: socket-dir
|
name: socket-dir
|
||||||
@ -58,6 +100,19 @@ spec:
|
|||||||
- mountPath: /var/lib/kubelet/plugins
|
- mountPath: /var/lib/kubelet/plugins
|
||||||
mountPropagation: Bidirectional
|
mountPropagation: Bidirectional
|
||||||
name: plugins-dir
|
name: plugins-dir
|
||||||
|
- mountPath: /csi-data-dir
|
||||||
|
name: csi-data-dir
|
||||||
|
|
||||||
|
- name: liveness-probe
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /csi
|
||||||
|
name: socket-dir
|
||||||
|
image: quay.io/k8scsi/livenessprobe:v1.1.0
|
||||||
|
args:
|
||||||
|
- --csi-address=/csi/csi.sock
|
||||||
|
- --connection-timeout=3s
|
||||||
|
- --health-port=9898
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
- hostPath:
|
- hostPath:
|
||||||
path: /var/lib/kubelet/plugins/csi-hostpath
|
path: /var/lib/kubelet/plugins/csi-hostpath
|
||||||
@ -75,3 +130,9 @@ spec:
|
|||||||
path: /var/lib/kubelet/plugins
|
path: /var/lib/kubelet/plugins
|
||||||
type: Directory
|
type: Directory
|
||||||
name: plugins-dir
|
name: plugins-dir
|
||||||
|
- hostPath:
|
||||||
|
# 'path' is where PV data is persisted on host.
|
||||||
|
# using /tmp is also possible while the PVs will not available after plugin container recreation or host reboot
|
||||||
|
path: /var/lib/csi-hostpath-data/
|
||||||
|
type: DirectoryOrCreate
|
||||||
|
name: csi-data-dir
|
@ -1,12 +1,12 @@
|
|||||||
kind: Service
|
kind: Service
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
metadata:
|
metadata:
|
||||||
name: csi-hostpath-provisioner
|
name: csi-hostpath-provisioner
|
||||||
labels:
|
labels:
|
||||||
app: csi-hostpath-provisioner
|
app: csi-hostpath-provisioner
|
||||||
spec:
|
spec:
|
||||||
selector:
|
selector:
|
||||||
app: csi-hostpath-provisioner
|
app: csi-hostpath-provisioner
|
||||||
ports:
|
ports:
|
||||||
- name: dummy
|
- name: dummy
|
||||||
port: 12345
|
port: 12345
|
||||||
@ -27,19 +27,25 @@ spec:
|
|||||||
labels:
|
labels:
|
||||||
app: csi-hostpath-provisioner
|
app: csi-hostpath-provisioner
|
||||||
spec:
|
spec:
|
||||||
|
affinity:
|
||||||
|
podAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- labelSelector:
|
||||||
|
matchExpressions:
|
||||||
|
- key: app
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- csi-hostpathplugin
|
||||||
|
topologyKey: kubernetes.io/hostname
|
||||||
serviceAccountName: csi-provisioner
|
serviceAccountName: csi-provisioner
|
||||||
containers:
|
containers:
|
||||||
- name: csi-provisioner
|
- name: csi-provisioner
|
||||||
# TODO: replace with official 1.4.0 release when ready
|
# TODO: replace with official 1.4.0 release when ready
|
||||||
image: quay.io/k8scsi/csi-provisioner:v1.4.0-rc1
|
image: quay.io/k8scsi/csi-provisioner:v1.4.0-rc1
|
||||||
args:
|
args:
|
||||||
- "--provisioner=csi-hostpath"
|
- -v=5
|
||||||
- "--csi-address=$(ADDRESS)"
|
- --csi-address=/csi/csi.sock
|
||||||
- "--connection-timeout=15s"
|
- --connection-timeout=15s
|
||||||
env:
|
|
||||||
- name: ADDRESS
|
|
||||||
value: /csi/csi.sock
|
|
||||||
imagePullPolicy: Always
|
|
||||||
volumeMounts:
|
volumeMounts:
|
||||||
- mountPath: /csi
|
- mountPath: /csi
|
||||||
name: socket-dir
|
name: socket-dir
|
||||||
|
@ -7,9 +7,6 @@ subjects:
|
|||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: csi-attacher
|
name: csi-attacher
|
||||||
namespace: default
|
namespace: default
|
||||||
- kind: ServiceAccount
|
|
||||||
name: csi-node-sa
|
|
||||||
namespace: default
|
|
||||||
- kind: ServiceAccount
|
- kind: ServiceAccount
|
||||||
name: csi-provisioner
|
name: csi-provisioner
|
||||||
namespace: default
|
namespace: default
|
||||||
|
@ -1,12 +0,0 @@
|
|||||||
kind: ClusterRoleBinding
|
|
||||||
apiVersion: rbac.authorization.k8s.io/v1
|
|
||||||
metadata:
|
|
||||||
name: psp-csi-mock-role
|
|
||||||
subjects:
|
|
||||||
- kind: ServiceAccount
|
|
||||||
name: csi-driver-registrar
|
|
||||||
namespace: default
|
|
||||||
roleRef:
|
|
||||||
kind: ClusterRole
|
|
||||||
name: e2e-test-privileged-psp
|
|
||||||
apiGroup: rbac.authorization.k8s.io
|
|
Loading…
Reference in New Issue
Block a user