e2e/storage: test usage of volume in multiple pods at once

This is a special case that both kubelet and the volume driver should
support, because users might expect it. One Kubernetes mechanism to
deploy pods like this is via pod affinity.

However, strictly speaking the CSI spec does not allow this usage
mode (see https://github.com/container-storage-interface/spec/pull/150) and
there is an on-going debate to enable it (see
https://github.com/container-storage-interface/spec/issues/178). Therefore
this test gets skipped unless explicitly enabled for a driver.

CSI drivers which create a block device for a remote volume in
NodePublishVolume fail this test. They have to make the volume
available in NodeStageVolume and then in NodePublishVolume merely do a
bind mount (as for example in
https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver/blob/master/pkg/gce-pd-csi-driver/node.go#L150).
This commit is contained in:
Patrick Ohly 2018-12-12 19:48:24 +01:00
parent ca42cf4993
commit 03d352f7aa
3 changed files with 57 additions and 2 deletions

View File

@ -84,7 +84,7 @@ var _ testsuites.SnapshottableTestDriver = &hostpathCSIDriver{}
// InitHostPathCSIDriver returns hostpathCSIDriver that implements TestDriver interface
func InitHostPathCSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
return initHostPathCSIDriver("csi-hostpath", config,
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapDataSource: true},
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapDataSource: true, testsuites.CapMultiPODs: true},
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
@ -259,7 +259,7 @@ func (m *mockCSIDriver) CleanupDriver() {
// InitHostPathV0CSIDriver returns a variant of hostpathCSIDriver with different manifests.
func InitHostPathV0CSIDriver(config testsuites.TestConfig) testsuites.TestDriver {
return initHostPathCSIDriver("csi-hostpath-v0", config,
map[testsuites.Capability]bool{testsuites.CapPersistence: true},
map[testsuites.Capability]bool{testsuites.CapPersistence: true, testsuites.CapMultiPODs: true},
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",

View File

@ -18,6 +18,7 @@ package testsuites
import (
"fmt"
"sync"
"time"
. "github.com/onsi/ginkgo"
@ -245,6 +246,50 @@ func testProvisioning(input *provisioningTestInput) {
}
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
})
It("should allow concurrent writes on the same node", func() {
if !input.dInfo.Capabilities[CapMultiPODs] {
framework.Skipf("Driver %q does not support multiple concurrent pods - skipping", input.dInfo.Name)
}
input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
// We start two pods concurrently on the same node,
// using the same PVC. Both wait for other to create a
// file before returning. The pods are forced onto the
// same node via pod affinity.
wg := sync.WaitGroup{}
wg.Add(2)
firstPodName := "pvc-tester-first"
secondPodName := "pvc-tester-second"
run := func(podName, command string) {
defer GinkgoRecover()
defer wg.Done()
node := NodeSelection{
Name: input.nodeName,
}
if podName == secondPodName {
node.Affinity = &v1.Affinity{
PodAffinity: &v1.PodAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
{LabelSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{
// Set by RunInPodWithVolume.
"app": firstPodName,
},
},
TopologyKey: "kubernetes.io/hostname",
},
},
},
}
}
RunInPodWithVolume(input.cs, claim.Namespace, claim.Name, podName, command, node)
}
go run(firstPodName, "touch /mnt/test/first && while ! [ -f /mnt/test/second ]; do sleep 1; done")
go run(secondPodName, "touch /mnt/test/second && while ! [ -f /mnt/test/first ]; do sleep 1; done")
wg.Wait()
}
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
})
}
// TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest and storageClass
@ -561,6 +606,9 @@ func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: podName + "-",
Labels: map[string]string{
"app": podName,
},
},
Spec: v1.PodSpec{
NodeName: node.Name,

View File

@ -97,6 +97,13 @@ const (
CapFsGroup Capability = "fsGroup" // volume ownership via fsGroup
CapExec Capability = "exec" // exec a file in the volume
CapDataSource Capability = "dataSource" // support populate data from snapshot
// multiple pods on a node can use the same volume concurrently;
// for CSI, see:
// - https://github.com/container-storage-interface/spec/pull/150
// - https://github.com/container-storage-interface/spec/issues/178
// - NodeStageVolume in the spec
CapMultiPODs Capability = "multipods"
)
// DriverInfo represents a combination of parameters to be used in implementation of TestDriver