mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-12 20:57:20 +00:00
e2e/storage: test usage of volume in multiple pods at once
This is a special case that both kubelet and the volume driver should support, because users might expect it. One Kubernetes mechanism to deploy pods like this is via pod affinity. However, strictly speaking the CSI spec does not allow this usage mode (see https://github.com/container-storage-interface/spec/pull/150) and there is an on-going debate to enable it (see https://github.com/container-storage-interface/spec/issues/178). Therefore this test gets skipped unless explicitly enabled for a driver. CSI drivers which create a block device for a remote volume in NodePublishVolume fail this test. They have to make the volume available in NodeStageVolume and then in NodePublishVolume merely do a bind mount (as for example in https://github.com/kubernetes-sigs/gcp-compute-persistent-disk-csi-driver/blob/master/pkg/gce-pd-csi-driver/node.go#L150).
This commit is contained in:
@@ -18,6 +18,7 @@ package testsuites
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@@ -245,6 +246,50 @@ func testProvisioning(input *provisioningTestInput) {
|
||||
}
|
||||
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
|
||||
})
|
||||
|
||||
It("should allow concurrent writes on the same node", func() {
|
||||
if !input.dInfo.Capabilities[CapMultiPODs] {
|
||||
framework.Skipf("Driver %q does not support multiple concurrent pods - skipping", input.dInfo.Name)
|
||||
}
|
||||
input.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
|
||||
// We start two pods concurrently on the same node,
|
||||
// using the same PVC. Both wait for other to create a
|
||||
// file before returning. The pods are forced onto the
|
||||
// same node via pod affinity.
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(2)
|
||||
firstPodName := "pvc-tester-first"
|
||||
secondPodName := "pvc-tester-second"
|
||||
run := func(podName, command string) {
|
||||
defer GinkgoRecover()
|
||||
defer wg.Done()
|
||||
node := NodeSelection{
|
||||
Name: input.nodeName,
|
||||
}
|
||||
if podName == secondPodName {
|
||||
node.Affinity = &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{
|
||||
{LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
// Set by RunInPodWithVolume.
|
||||
"app": firstPodName,
|
||||
},
|
||||
},
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
RunInPodWithVolume(input.cs, claim.Namespace, claim.Name, podName, command, node)
|
||||
}
|
||||
go run(firstPodName, "touch /mnt/test/first && while ! [ -f /mnt/test/second ]; do sleep 1; done")
|
||||
go run(secondPodName, "touch /mnt/test/second && while ! [ -f /mnt/test/first ]; do sleep 1; done")
|
||||
wg.Wait()
|
||||
}
|
||||
TestDynamicProvisioning(input.testCase, input.cs, input.pvc, input.sc)
|
||||
})
|
||||
}
|
||||
|
||||
// TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest and storageClass
|
||||
@@ -561,6 +606,9 @@ func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: podName + "-",
|
||||
Labels: map[string]string{
|
||||
"app": podName,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: node.Name,
|
||||
|
Reference in New Issue
Block a user