Merge pull request #81960 from pohly/ephemeral-tests

ephemeral volume tests
This commit is contained in:
Kubernetes Prow Robot 2019-08-28 12:02:07 -07:00 committed by GitHub
commit 8b4fd4104d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 390 additions and 209 deletions

View File

@ -370,10 +370,14 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
framework.ExpectNoError(err, "failed to test for CSIInlineVolumes")
}
ginkgo.By("Deleting the previously created pod")
err = e2epod.DeletePodWithWait(m.cs, pod)
framework.ExpectNoError(err, "while deleting")
ginkgo.By("Checking CSI driver logs")
// The driver is deployed as a statefulset with stable pod names
driverPodName := "csi-mockplugin-0"
err = checkPodInfo(m.cs, f.Namespace.Name, driverPodName, "mock", pod, test.expectPodInfo, test.expectEphemeral, csiInlineVolumesEnabled)
err = checkPodLogs(m.cs, f.Namespace.Name, driverPodName, "mock", pod, test.expectPodInfo, test.expectEphemeral, csiInlineVolumesEnabled)
framework.ExpectNoError(err)
})
}
@ -719,8 +723,9 @@ func startPausePodWithVolumeSource(cs clientset.Interface, volumeSource v1.Volum
return cs.CoreV1().Pods(ns).Create(pod)
}
// checkPodInfo tests that NodePublish was called with expected volume_context
func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContainerName string, pod *v1.Pod, expectPodInfo, ephemeralVolume, csiInlineVolumesEnabled bool) error {
// checkPodLogs tests that NodePublish was called with expected volume_context and (for ephemeral inline volumes)
// has the matching NodeUnpublish
func checkPodLogs(cs clientset.Interface, namespace, driverPodName, driverContainerName string, pod *v1.Pod, expectPodInfo, ephemeralVolume, csiInlineVolumesEnabled bool) error {
expectedAttributes := map[string]string{
"csi.storage.k8s.io/pod.name": pod.Name,
"csi.storage.k8s.io/pod.namespace": namespace,
@ -741,6 +746,8 @@ func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContai
// Find NodePublish in the logs
foundAttributes := sets.NewString()
logLines := strings.Split(log, "\n")
numNodePublishVolume := 0
numNodeUnpublishVolume := 0
for _, line := range logLines {
if !strings.HasPrefix(line, "gRPCCall:") {
continue
@ -759,19 +766,23 @@ func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContai
e2elog.Logf("Could not parse CSI driver log line %q: %s", line, err)
continue
}
if call.Method != "/csi.v1.Node/NodePublishVolume" {
continue
}
// Check that NodePublish had expected attributes
for k, v := range expectedAttributes {
vv, found := call.Request.VolumeContext[k]
if found && v == vv {
foundAttributes.Insert(k)
e2elog.Logf("Found volume attribute %s: %s", k, v)
switch call.Method {
case "/csi.v1.Node/NodePublishVolume":
numNodePublishVolume++
if numNodePublishVolume == 1 {
// Check that NodePublish had expected attributes for first volume
for k, v := range expectedAttributes {
vv, found := call.Request.VolumeContext[k]
if found && v == vv {
foundAttributes.Insert(k)
e2elog.Logf("Found volume attribute %s: %s", k, v)
}
}
}
case "/csi.v1.Node/NodeUnpublishVolume":
e2elog.Logf("Found NodeUnpublishVolume: %+v", call)
numNodeUnpublishVolume++
}
// Process just the first NodePublish, the rest of the log is useless.
break
}
if expectPodInfo {
if foundAttributes.Len() != len(expectedAttributes) {
@ -782,6 +793,9 @@ func checkPodInfo(cs clientset.Interface, namespace, driverPodName, driverContai
if foundAttributes.Len() != 0 {
return fmt.Errorf("some unexpected volume attributes were found: %+v", foundAttributes.List())
}
if numNodePublishVolume != numNodeUnpublishVolume {
return fmt.Errorf("number of NodePublishVolume %d != number of NodeUnpublishVolume %d", numNodePublishVolume, numNodeUnpublishVolume)
}
return nil
}

View File

@ -42,6 +42,7 @@ var csiTestDrivers = []func() testsuites.TestDriver{
// List of testSuites to be executed in below loop
var csiTestSuites = []func() testsuites.TestSuite{
testsuites.InitEphemeralTestSuite,
testsuites.InitVolumesTestSuite,
testsuites.InitVolumeIOTestSuite,
testsuites.InitVolumeModeTestSuite,

View File

@ -62,11 +62,12 @@ const (
// hostpathCSI
type hostpathCSIDriver struct {
driverInfo testsuites.DriverInfo
manifests []string
driverInfo testsuites.DriverInfo
manifests []string
volumeAttributes []map[string]string
}
func initHostPathCSIDriver(name string, capabilities map[testsuites.Capability]bool, manifests ...string) testsuites.TestDriver {
func initHostPathCSIDriver(name string, capabilities map[testsuites.Capability]bool, volumeAttributes []map[string]string, manifests ...string) testsuites.TestDriver {
return &hostpathCSIDriver{
driverInfo: testsuites.DriverInfo{
Name: name,
@ -77,13 +78,15 @@ func initHostPathCSIDriver(name string, capabilities map[testsuites.Capability]b
),
Capabilities: capabilities,
},
manifests: manifests,
manifests: manifests,
volumeAttributes: volumeAttributes,
}
}
var _ testsuites.TestDriver = &hostpathCSIDriver{}
var _ testsuites.DynamicPVTestDriver = &hostpathCSIDriver{}
var _ testsuites.SnapshottableTestDriver = &hostpathCSIDriver{}
var _ testsuites.EphemeralTestDriver = &hostpathCSIDriver{}
// InitHostPathCSIDriver returns hostpathCSIDriver that implements TestDriver interface
func InitHostPathCSIDriver() testsuites.TestDriver {
@ -97,16 +100,20 @@ func InitHostPathCSIDriver() testsuites.TestDriver {
}
return initHostPathCSIDriver("csi-hostpath",
capabilities,
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
// Volume attributes don't matter, but we have to provide at least one map.
[]map[string]string{
{"foo": "bar"},
},
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-snapshotter/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-attacher.yaml",
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-driverinfo.yaml",
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml",
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-provisioner.yaml",
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-snapshotter.yaml",
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-resizer.yaml",
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpathplugin.yaml",
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-snapshotter.yaml",
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/e2e-test-rbac.yaml",
)
}
@ -116,6 +123,9 @@ func (h *hostpathCSIDriver) GetDriverInfo() *testsuites.DriverInfo {
}
func (h *hostpathCSIDriver) SkipUnsupportedTest(pattern testpatterns.TestPattern) {
if pattern.VolType == testpatterns.CSIInlineVolume && len(h.volumeAttributes) == 0 {
framework.Skipf("%s has no volume attributes defined, doesn't support ephemeral inline volumes", h.driverInfo.Name)
}
}
func (h *hostpathCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.PerTestConfig, fsType string) *storagev1.StorageClass {
@ -127,6 +137,14 @@ func (h *hostpathCSIDriver) GetDynamicProvisionStorageClass(config *testsuites.P
return testsuites.GetStorageClass(provisioner, parameters, nil, ns, suffix)
}
func (h *hostpathCSIDriver) GetVolume(config *testsuites.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) {
return h.volumeAttributes[volumeNumber%len(h.volumeAttributes)], false /* not shared */, false /* read-write */
}
func (h *hostpathCSIDriver) GetCSIDriverName(config *testsuites.PerTestConfig) string {
return config.GetUniqueDriverName()
}
func (h *hostpathCSIDriver) GetSnapshotClass(config *testsuites.PerTestConfig) *unstructured.Unstructured {
snapshotter := config.GetUniqueDriverName()
parameters := map[string]string{}
@ -205,7 +223,6 @@ var _ testsuites.DynamicPVTestDriver = &mockCSIDriver{}
// InitMockCSIDriver returns a mockCSIDriver that implements TestDriver interface
func InitMockCSIDriver(driverOpts CSIMockDriverOpts) testsuites.TestDriver {
driverManifests := []string{
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml",
@ -305,7 +322,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest
NodeName: config.ClientNodeName,
PodInfo: m.podInfo,
CanAttach: &m.attachable,
VolumeLifecycleModes: []storagev1beta1.VolumeLifecycleMode{
VolumeLifecycleModes: &[]storagev1beta1.VolumeLifecycleMode{
storagev1beta1.VolumeLifecyclePersistent,
storagev1beta1.VolumeLifecycleEphemeral,
},
@ -329,7 +346,10 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest
func InitHostPathV0CSIDriver() testsuites.TestDriver {
return initHostPathCSIDriver("csi-hostpath-v0",
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapMultiPODs: true},
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
nil, /* no volume attributes -> no ephemeral volume testing */
// Using the current set of rbac.yaml files is problematic here because they don't
// match the version of the rules that were written for the releases of external-attacher
// and external-provisioner that we are using here. It happens to work in practice...
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath-v0/csi-hostpath-attacher.yaml",
@ -423,7 +443,6 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTes
createGCESecrets(f.ClientSet, f.Namespace.Name)
manifests := []string{
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/gce-pd/csi-controller-rbac.yaml",

View File

@ -179,14 +179,25 @@ type driverDefinition struct {
// TODO (?): load from file
}
// InlineVolumeAttributes defines one or more set of attributes for
// use as inline ephemeral volumes. At least one set of attributes
// has to be defined to enable testing of inline ephemeral volumes.
// If a test needs more volumes than defined, some of the defined
// InlineVolumes defines one or more volumes for use as inline
// ephemeral volumes. At least one such volume has to be
// defined to enable testing of inline ephemeral volumes. If
// a test needs more volumes than defined, some of the defined
// volumes will be used multiple times.
//
// DriverInfo.Name is used as name of the driver in the inline volume.
InlineVolumeAttributes []map[string]string
InlineVolumes []struct {
// Attributes are passed as NodePublishVolumeReq.volume_context.
// Can be empty.
Attributes map[string]string
// Shared defines whether the resulting volume is
// shared between different pods (i.e. changes made
// in one pod are visible in another)
Shared bool
// ReadOnly must be set to true if the driver does not
// support mounting as read/write.
ReadOnly bool
}
// ClaimSize defines the desired size of dynamically
// provisioned volumes. Default is "5GiB".
@ -221,7 +232,7 @@ func (d *driverDefinition) SkipUnsupportedTest(pattern testpatterns.TestPattern)
supported = true
}
case testpatterns.CSIInlineVolume:
supported = len(d.InlineVolumeAttributes) != 0
supported = len(d.InlineVolumes) != 0
}
if !supported {
framework.Skipf("Driver %q does not support volume type %q - skipping", d.DriverInfo.Name, pattern.VolType)
@ -294,11 +305,12 @@ func (d *driverDefinition) GetClaimSize() string {
return d.ClaimSize
}
func (d *driverDefinition) GetVolumeAttributes(config *testsuites.PerTestConfig, volumeNumber int) map[string]string {
if len(d.InlineVolumeAttributes) == 0 {
func (d *driverDefinition) GetVolume(config *testsuites.PerTestConfig, volumeNumber int) (map[string]string, bool, bool) {
if len(d.InlineVolumes) == 0 {
framework.Skipf("%s does not have any InlineVolumeAttributes defined", d.DriverInfo.Name)
}
return d.InlineVolumeAttributes[volumeNumber%len(d.InlineVolumeAttributes)]
volume := d.InlineVolumes[volumeNumber%len(d.InlineVolumes)]
return volume.Attributes, volume.Shared, volume.ReadOnly
}
func (d *driverDefinition) GetCSIDriverName(config *testsuites.PerTestConfig) string {

View File

@ -17,7 +17,9 @@ limitations under the License.
package testsuites
import (
"flag"
"fmt"
"strings"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
@ -30,6 +32,7 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/volume"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
)
type ephemeralTestSuite struct {
@ -97,8 +100,8 @@ func (p *ephemeralTestSuite) defineTests(driver TestDriver, pattern testpatterns
Namespace: f.Namespace.Name,
DriverName: eDriver.GetCSIDriverName(l.config),
Node: e2epod.NodeSelection{Name: l.config.ClientNodeName},
GetVolumeAttributes: func(volumeNumber int) map[string]string {
return eDriver.GetVolumeAttributes(l.config, volumeNumber)
GetVolume: func(volumeNumber int) (map[string]string, bool, bool) {
return eDriver.GetVolume(l.config, volumeNumber)
},
}
}
@ -110,10 +113,73 @@ func (p *ephemeralTestSuite) defineTests(driver TestDriver, pattern testpatterns
}
}
ginkgo.It("should create inline ephemeral volume", func() {
ginkgo.It("should create read-only inline ephemeral volume", func() {
init()
defer cleanup()
l.testCase.ReadOnly = true
l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} {
storageutils.VerifyExecInPodSucceed(pod, "mount | grep /mnt/test | grep ro,")
return nil
}
l.testCase.TestEphemeral()
})
ginkgo.It("should create read/write inline ephemeral volume", func() {
init()
defer cleanup()
l.testCase.ReadOnly = false
l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} {
storageutils.VerifyExecInPodSucceed(pod, "mount | grep /mnt/test | grep rw,")
return nil
}
l.testCase.TestEphemeral()
})
ginkgo.It("should support two pods which share the same volume", func() {
init()
defer cleanup()
// We test in read-only mode if that is all that the driver supports,
// otherwise read/write.
_, shared, readOnly := eDriver.GetVolume(l.config, 0)
l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} {
// Create another pod with the same inline volume attributes.
pod2 := StartInPodWithInlineVolume(f.ClientSet, f.Namespace.Name, "inline-volume-tester2", "sleep 100000",
[]v1.CSIVolumeSource{*pod.Spec.Volumes[0].CSI},
readOnly,
l.testCase.Node)
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, pod2.Name, pod2.Namespace), "waiting for second pod with inline volume")
// If (and only if) we were able to mount
// read/write and volume data is not shared
// between pods, then we can check whether
// data written in one pod is really not
// visible in the other.
if !readOnly && !shared {
ginkgo.By("writing data in one pod and checking for it in the second")
storageutils.VerifyExecInPodSucceed(pod, "touch /mnt/test-0/hello-world")
storageutils.VerifyExecInPodSucceed(pod2, "[ ! -f /mnt/test-0/hello-world ]")
}
defer StopPod(f.ClientSet, pod2)
return nil
}
l.testCase.TestEphemeral()
})
var numInlineVolumes = flag.Int("storage.ephemeral."+strings.Replace(driver.GetDriverInfo().Name, ".", "-", -1)+".numInlineVolumes",
2, "number of ephemeral inline volumes per pod")
ginkgo.It("should support multiple inline ephemeral volumes", func() {
init()
defer cleanup()
l.testCase.NumInlineVolumes = *numInlineVolumes
gomega.Expect(*numInlineVolumes).To(gomega.BeNumerically(">", 0), "positive number of inline volumes")
l.testCase.TestEphemeral()
})
}
@ -126,12 +192,18 @@ type EphemeralTest struct {
DriverName string
Node e2epod.NodeSelection
// GetVolumeAttributes returns the volume attributes for a
// GetVolume returns the volume attributes for a
// certain inline ephemeral volume, enumerated starting with
// #0. Some tests might require more than one volume. They can
// all be the same or different, depending what the driver supports
// and/or wants to test.
GetVolumeAttributes func(volumeNumber int) map[string]string
//
// For each volume, the test driver can specify the
// attributes, whether two pods using those attributes will
// end up sharing the same backend storage (i.e. changes made
// in one pod will be visible in the other), and whether
// the volume can be mounted read/write or only read-only.
GetVolume func(volumeNumber int) (attributes map[string]string, shared bool, readOnly bool)
// RunningPodCheck is invoked while a pod using an inline volume is running.
// It can execute additional checks on the pod and its volume(s). Any data
@ -145,28 +217,46 @@ type EphemeralTest struct {
// removed. How to do such a check is driver-specific and not
// covered by the generic storage test suite.
StoppedPodCheck func(nodeName string, runningPodData interface{})
// NumInlineVolumes sets the number of ephemeral inline volumes per pod.
// Unset (= zero) is the same as one.
NumInlineVolumes int
// ReadOnly limits mounting to read-only.
ReadOnly bool
}
// TestEphemeral tests pod creation with one ephemeral volume.
func (t EphemeralTest) TestEphemeral() {
client := t.Client
gomega.Expect(client).NotTo(gomega.BeNil(), "EphemeralTest.Client is required")
gomega.Expect(t.GetVolumeAttributes).NotTo(gomega.BeNil(), "EphemeralTest.GetVolumeAttributes is required")
gomega.Expect(t.GetVolume).NotTo(gomega.BeNil(), "EphemeralTest.GetVolume is required")
gomega.Expect(t.DriverName).NotTo(gomega.BeEmpty(), "EphemeralTest.DriverName is required")
ginkgo.By(fmt.Sprintf("checking the requested inline volume exists in the pod running on node %+v", t.Node))
command := "mount | grep /mnt/test"
pod := StartInPodWithInlineVolume(client, t.Namespace, "inline-volume-tester", command,
v1.CSIVolumeSource{
command := "mount | grep /mnt/test && sleep 10000"
var csiVolumes []v1.CSIVolumeSource
numVolumes := t.NumInlineVolumes
if numVolumes == 0 {
numVolumes = 1
}
for i := 0; i < numVolumes; i++ {
attributes, _, readOnly := t.GetVolume(i)
csi := v1.CSIVolumeSource{
Driver: t.DriverName,
VolumeAttributes: t.GetVolumeAttributes(0),
},
t.Node)
VolumeAttributes: attributes,
}
if readOnly && !t.ReadOnly {
framework.Skipf("inline ephemeral volume #%d is read-only, but the test needs a read/write volume", i)
}
csiVolumes = append(csiVolumes, csi)
}
pod := StartInPodWithInlineVolume(client, t.Namespace, "inline-volume-tester", command, csiVolumes, t.ReadOnly, t.Node)
defer func() {
// pod might be nil now.
StopPod(client, pod)
}()
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace), "waiting for pod with inline volume")
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(client, pod.Name, pod.Namespace), "waiting for pod with inline volume")
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "get pod")
actualNodeName := runningPod.Spec.NodeName
@ -185,9 +275,9 @@ func (t EphemeralTest) TestEphemeral() {
}
}
// StartInPodWithInlineVolume starts a command in a pod with given volume mounted to /mnt/test directory.
// StartInPodWithInlineVolume starts a command in a pod with given volume(s) mounted to /mnt/test-<number> directory.
// The caller is responsible for checking the pod and deleting it.
func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command string, csiVolume v1.CSIVolumeSource, node e2epod.NodeSelection) *v1.Pod {
func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command string, csiVolumes []v1.CSIVolumeSource, readOnly bool, node e2epod.NodeSelection) *v1.Pod {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
@ -208,26 +298,29 @@ func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command stri
Name: "csi-volume-tester",
Image: volume.GetTestImage(framework.BusyBoxImage),
Command: volume.GenerateScriptCmd(command),
VolumeMounts: []v1.VolumeMount{
{
Name: "my-volume",
MountPath: "/mnt/test",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: "my-volume",
VolumeSource: v1.VolumeSource{
CSI: &csiVolume,
},
},
},
},
}
for i, csiVolume := range csiVolumes {
name := fmt.Sprintf("my-volume-%d", i)
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts,
v1.VolumeMount{
Name: name,
MountPath: fmt.Sprintf("/mnt/test-%d", i),
ReadOnly: readOnly,
})
pod.Spec.Volumes = append(pod.Spec.Volumes,
v1.Volume{
Name: name,
VolumeSource: v1.VolumeSource{
CSI: &csiVolume,
},
})
}
pod, err := c.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err, "failed to create pod")
return pod

View File

@ -588,7 +588,8 @@ func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command
return pod
}
// StopPod first tries to log the output of the pod's container, then deletes the pod.
// StopPod first tries to log the output of the pod's container, then deletes the pod and
// waits for that to succeed.
func StopPod(c clientset.Interface, pod *v1.Pod) {
if pod == nil {
return
@ -600,6 +601,7 @@ func StopPod(c clientset.Interface, pod *v1.Pod) {
e2elog.Logf("Pod %s has the following logs: %s", pod.Name, body)
}
e2epod.DeletePodOrFail(c, pod.Namespace, pod.Name)
e2epod.WaitForPodNoLongerRunningInNamespace(c, pod.Name, pod.Namespace)
}
func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) {

View File

@ -105,12 +105,17 @@ type DynamicPVTestDriver interface {
type EphemeralTestDriver interface {
TestDriver
// GetVolumeAttributes returns the volume attributes for a
// certain inline ephemeral volume, enumerated starting with
// #0. Some tests might require more than one volume. They can
// all be the same or different, depending what the driver supports
// GetVolume returns the volume attributes for a certain
// inline ephemeral volume, enumerated starting with #0. Some
// tests might require more than one volume. They can all be
// the same or different, depending what the driver supports
// and/or wants to test.
GetVolumeAttributes(config *PerTestConfig, volumeNumber int) map[string]string
//
// For each volume, the test driver can return volume attributes,
// whether the resulting volume is shared between different pods (i.e.
// changes made in one pod are visible in another), and whether the
// volume can be mounted read/write or only read-only.
GetVolume(config *PerTestConfig, volumeNumber int) (attributes map[string]string, shared bool, readOnly bool)
// GetCSIDriverName returns the name that was used when registering with
// kubelet. Depending on how the driver was deployed, this can be different

View File

@ -131,7 +131,9 @@ func PatchCSIDeployment(f *framework.Framework, o PatchCSIOptions, object interf
if o.CanAttach != nil {
object.Spec.AttachRequired = o.CanAttach
}
object.Spec.VolumeLifecycleModes = o.VolumeLifecycleModes
if o.VolumeLifecycleModes != nil {
object.Spec.VolumeLifecycleModes = *o.VolumeLifecycleModes
}
}
return nil
@ -171,8 +173,8 @@ type PatchCSIOptions struct {
// field *if* the driver deploys a CSIDriver object. Ignored
// otherwise.
CanAttach *bool
// The value to use for the CSIDriver.Spec.VolumeLifecycleModes
// If not nil, the value to use for the CSIDriver.Spec.VolumeLifecycleModes
// field *if* the driver deploys a CSIDriver object. Ignored
// otherwise.
VolumeLifecycleModes []storagev1beta1.VolumeLifecycleMode
VolumeLifecycleModes *[]storagev1beta1.VolumeLifecycleMode
}

View File

@ -1 +0,0 @@
The original file is (or will be) https://github.com/kubernetes-csi/driver-registrar/blob/master/deploy/kubernetes/rbac.yaml

View File

@ -1,51 +0,0 @@
# This YAML file contains all RBAC objects that are necessary to run external
# CSI provisioner.
#
# In production, each CSI driver deployment has to be customized:
# - to avoid conflicts, use non-default namespace and different names
# for non-namespaced entities like the ClusterRole
# - decide whether the deployment replicates the external CSI
# provisioner, in which case leadership election must be enabled;
# this influences the RBAC setup, see below
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-node-sa
# replace with non-default namespace name
namespace: default
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: driver-registrar-runner
rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
# The following permissions are only needed when running
# driver-registrar without the --kubelet-registration-path
# parameter, i.e. when using driver-registrar instead of
# kubelet to update the csi.volume.kubernetes.io/nodeid
# annotation. That mode of operation is going to be deprecated
# and should not be used anymore, but is needed on older
# Kubernetes versions.
# - apiGroups: [""]
# resources: ["nodes"]
# verbs: ["get", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-driver-registrar-role
subjects:
- kind: ServiceAccount
name: csi-node-sa
# replace with non-default namespace name
namespace: default
roleRef:
kind: ClusterRole
name: driver-registrar-runner
apiGroup: rbac.authorization.k8s.io

View File

@ -1 +1 @@
The original file is (or will be) https://github.com/kubernetes-csi/external-attacher/blob/master/deploy/kubernetes/rbac.yaml
The original file is https://github.com/kubernetes-csi/external-attacher/blob/<version>/deploy/kubernetes/rbac.yaml

View File

@ -1 +1 @@
The original file is (or will be) https://github.com/kubernetes-csi/external-provisioner/blob/master/deploy/kubernetes/rbac.yaml
The original file is https://github.com/kubernetes-csi/external-provisioner/blob/<version>/deploy/kubernetes/rbac.yaml

View File

@ -1 +1 @@
The original file is (or will be) https://github.com/kubernetes-csi/external-resizer/blob/master/deploy/kubernetes/rbac.yaml
The original file is https://github.com/kubernetes-csi/external-resizer/blob/<version>/deploy/kubernetes/rbac.yaml

View File

@ -36,9 +36,6 @@ rules:
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["update", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
@ -59,7 +56,7 @@ roleRef:
apiGroup: rbac.authorization.k8s.io
---
# Resizer must be able to work with leases in current namespace
# Resizer must be able to work with end point in current namespace
# if (and only if) leadership election is enabled
kind: Role
apiVersion: rbac.authorization.k8s.io/v1

View File

@ -1 +1 @@
The original file is https://github.com/kubernetes-csi/external-snapshotter/blob/master/deploy/kubernetes/rbac.yaml
The original file is https://github.com/kubernetes-csi/external-snapshotter/blob/<version>/deploy/kubernetes/rbac.yaml

View File

@ -20,36 +20,36 @@ metadata:
# rename if there are conflicts
name: external-snapshotter-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots/status"]
verbs: ["update"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "list", "watch", "delete"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots/status"]
verbs: ["update"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "list", "watch", "delete", "get", "update"]
---
kind: ClusterRoleBinding
@ -57,12 +57,39 @@ apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-snapshotter-role
subjects:
- kind: ServiceAccount
name: csi-snapshotter
# replace with non-default namespace name
namespace: default
- kind: ServiceAccount
name: csi-snapshotter
# replace with non-default namespace name
namespace: default
roleRef:
kind: ClusterRole
# change the name also here if the ClusterRole gets renamed
name: external-snapshotter-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: default # TODO: replace with the namespace you want for your sidecar
name: external-snapshotter-leaderelection
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-snapshotter-leaderelection
namespace: default # TODO: replace with the namespace you want for your sidecar
subjects:
- kind: ServiceAccount
name: csi-snapshotter
namespace: default # TODO: replace with the namespace you want for your sidecar
roleRef:
kind: Role
name: external-snapshotter-leaderelection
apiGroup: rbac.authorization.k8s.io

View File

@ -69,9 +69,6 @@ subjects:
- kind: ServiceAccount
name: csi-controller-sa
namespace: default
- kind: ServiceAccount
name: csi-node-sa
namespace: default
roleRef:
kind: ClusterRole
name: e2e-test-privileged-psp

View File

@ -11,7 +11,6 @@ spec:
labels:
app: gcp-compute-persistent-disk-csi-driver
spec:
serviceAccountName: csi-node-sa
containers:
- name: csi-driver-registrar
image: gcr.io/gke-release/csi-node-driver-registrar:v1.1.0-gke.0

View File

@ -11,7 +11,6 @@ spec:
labels:
app: csi-hostpathplugin
spec:
serviceAccountName: csi-node-sa
hostNetwork: true
containers:
- name: driver-registrar

View File

@ -7,9 +7,6 @@ subjects:
- kind: ServiceAccount
name: csi-attacher
namespace: default
- kind: ServiceAccount
name: csi-node-sa
namespace: default
- kind: ServiceAccount
name: csi-provisioner
namespace: default

View File

@ -27,20 +27,27 @@ spec:
labels:
app: csi-hostpath-attacher
spec:
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- csi-hostpathplugin
topologyKey: kubernetes.io/hostname
serviceAccountName: csi-attacher
containers:
- name: csi-attacher
image: quay.io/k8scsi/csi-attacher:v1.2.0
args:
- --v=5
- --csi-address=$(ADDRESS)
env:
- name: ADDRESS
value: /csi/csi.sock
imagePullPolicy: Always
- --csi-address=/csi/csi.sock
volumeMounts:
- mountPath: /csi
name: socket-dir
volumes:
- hostPath:
path: /var/lib/kubelet/plugins/csi-hostpath

View File

@ -0,0 +1,10 @@
apiVersion: storage.k8s.io/v1beta1
kind: CSIDriver
metadata:
name: hostpath.csi.k8s.io
spec:
# Supports both modes, but needs pod info for that to determine the actual mode.
podInfoOnMount: true
volumeLifecycleModes:
- Persistent
- Ephemeral

View File

@ -1,8 +1,31 @@
kind: DaemonSet
# Service defined here, plus serviceName below in StatefulSet,
# are needed only because of condition explained in
# https://github.com/kubernetes/kubernetes/issues/69608
kind: Service
apiVersion: v1
metadata:
name: csi-hostpathplugin
labels:
app: csi-hostpathplugin
spec:
selector:
app: csi-hostpathplugin
ports:
- name: dummy
port: 12345
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-hostpathplugin
spec:
serviceName: "csi-hostpathplugin"
# One replica only:
# Host path driver only works when everything runs
# on a single node. We achieve that by starting it once and then
# co-locate all other pods via inter-pod affinity
replicas: 1
selector:
matchLabels:
app: csi-hostpathplugin
@ -11,30 +34,38 @@ spec:
labels:
app: csi-hostpathplugin
spec:
serviceAccountName: csi-node-sa
hostNetwork: true
containers:
- name: driver-registrar
- name: node-driver-registrar
image: quay.io/k8scsi/csi-node-driver-registrar:v1.1.0
lifecycle:
preStop:
exec:
command: ["/bin/sh", "-c", "rm -rf /registration/csi-hostpath /registration/csi-hostpath-reg.sock"]
args:
- --v=5
- --csi-address=/csi/csi.sock
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock
securityContext:
privileged: true
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
imagePullPolicy: Always
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /registration
name: registration-dir
- mountPath: /csi-data-dir
name: csi-data-dir
- name: hostpath
image: quay.io/k8scsi/hostpathplugin:v1.2.0-rc5
args:
- "--drivername=hostpath.csi.k8s.io"
- "--v=5"
- "--endpoint=$(CSI_ENDPOINT)"
- "--nodeid=$(KUBE_NODE_NAME)"
@ -46,9 +77,20 @@ spec:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
imagePullPolicy: Always
securityContext:
privileged: true
ports:
- containerPort: 9898
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 3
periodSeconds: 2
volumeMounts:
- mountPath: /csi
name: socket-dir
@ -58,6 +100,19 @@ spec:
- mountPath: /var/lib/kubelet/plugins
mountPropagation: Bidirectional
name: plugins-dir
- mountPath: /csi-data-dir
name: csi-data-dir
- name: liveness-probe
volumeMounts:
- mountPath: /csi
name: socket-dir
image: quay.io/k8scsi/livenessprobe:v1.1.0
args:
- --csi-address=/csi/csi.sock
- --connection-timeout=3s
- --health-port=9898
volumes:
- hostPath:
path: /var/lib/kubelet/plugins/csi-hostpath
@ -75,3 +130,9 @@ spec:
path: /var/lib/kubelet/plugins
type: Directory
name: plugins-dir
- hostPath:
# 'path' is where PV data is persisted on host.
# using /tmp is also possible while the PVs will not available after plugin container recreation or host reboot
path: /var/lib/csi-hostpath-data/
type: DirectoryOrCreate
name: csi-data-dir

View File

@ -1,12 +1,12 @@
kind: Service
apiVersion: v1
metadata:
name: csi-hostpath-provisioner
name: csi-hostpath-provisioner
labels:
app: csi-hostpath-provisioner
app: csi-hostpath-provisioner
spec:
selector:
app: csi-hostpath-provisioner
app: csi-hostpath-provisioner
ports:
- name: dummy
port: 12345
@ -27,19 +27,25 @@ spec:
labels:
app: csi-hostpath-provisioner
spec:
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- csi-hostpathplugin
topologyKey: kubernetes.io/hostname
serviceAccountName: csi-provisioner
containers:
- name: csi-provisioner
# TODO: replace with official 1.4.0 release when ready
image: quay.io/k8scsi/csi-provisioner:v1.4.0-rc1
args:
- "--provisioner=csi-hostpath"
- "--csi-address=$(ADDRESS)"
- "--connection-timeout=15s"
env:
- name: ADDRESS
value: /csi/csi.sock
imagePullPolicy: Always
- -v=5
- --csi-address=/csi/csi.sock
- --connection-timeout=15s
volumeMounts:
- mountPath: /csi
name: socket-dir

View File

@ -7,9 +7,6 @@ subjects:
- kind: ServiceAccount
name: csi-attacher
namespace: default
- kind: ServiceAccount
name: csi-node-sa
namespace: default
- kind: ServiceAccount
name: csi-provisioner
namespace: default

View File

@ -1,12 +0,0 @@
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: psp-csi-mock-role
subjects:
- kind: ServiceAccount
name: csi-driver-registrar
namespace: default
roleRef:
kind: ClusterRole
name: e2e-test-privileged-psp
apiGroup: rbac.authorization.k8s.io