e2e test for ReadWriteOncePod PV access mode

Adds two tests for the enforcement of the ReadWriteOncePod
PersistentVolume access mode.

1. Tests that when two Pods are scheduled that reference the same
   ReadWriteOncePod PVC, the latter-scheduled Pod will be marked
   unschedulable because the PVC is in-use.
2. Tests that when two Pods are scheduled on the same node (setting
   Pod.Spec.NodeName to bypass scheduling for the second Pod), the
   latter Pod will fail to start because the PVC is already mounted on
   the Node.

Included are changes to update the hostpath CSI driver to accept new CSI
access modes. Its sidecar containers are already at supported versions
for ReadWriteOncePod and don't need updating. The GCP PD CSI driver does
not yet support the new CSI access modes, but its sidecar containers are
at supported versions and so the feature will work.

To support ReadWriteOncePod, the following CSI sidecars must be updated
to these versions or greater:
 - csi-provisioner:v3.0.0+
 - csi-attacher:v3.3.0+
 - csi-resizer:v1.3.0+

For more details, see:
https://github.com/kubernetes/enhancements/blob/master/keps/sig-storage/2485-read-write-once-pod-pv-access-mode/README.md
This commit is contained in:
Chris Henzie 2022-09-30 09:39:40 -07:00
parent be506dc46e
commit f6d9c27722
6 changed files with 239 additions and 4 deletions

View File

@ -148,6 +148,7 @@ func InitHostPathCSIDriver() storageframework.TestDriver {
storageframework.CapOfflineExpansion: true,
storageframework.CapOnlineExpansion: true,
storageframework.CapSingleNodeVolume: true,
storageframework.CapReadWriteOncePod: true,
// This is needed for the
// testsuites/volumelimits.go `should support volume limits`
@ -805,6 +806,7 @@ func InitGcePDCSIDriver() storageframework.TestDriver {
storageframework.CapOnlineExpansion: true,
storageframework.CapNodeExpansion: true,
storageframework.CapSnapshotDataSource: true,
storageframework.CapReadWriteOncePod: true,
},
RequiredAccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
TopologyKeys: []string{GCEPDCSIZoneTopologyKey},

View File

@ -183,6 +183,13 @@ const (
// for dynamic provisioning exists, the driver is expected to provide
// capacity information for it.
CapCapacity Capability = "capacity"
// To support ReadWriteOncePod, the following CSI sidecars must be
// updated to these versions or greater:
// - csi-provisioner:v3.0.0+
// - csi-attacher:v3.3.0+
// - csi-resizer:v1.3.0+
CapReadWriteOncePod Capability = "readWriteOncePod"
)
// DriverInfo represents static information about a TestDriver.

View File

@ -53,6 +53,11 @@ type VolumeResource struct {
// CreateVolumeResource constructs a VolumeResource for the current test. It knows how to deal with
// different test pattern volume types.
func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern TestPattern, testVolumeSizeRange e2evolume.SizeRange) *VolumeResource {
return CreateVolumeResourceWithAccessModes(driver, config, pattern, testVolumeSizeRange, driver.GetDriverInfo().RequiredAccessModes)
}
// CreateVolumeResourceWithAccessModes constructs a VolumeResource for the current test with the provided access modes.
func CreateVolumeResourceWithAccessModes(driver TestDriver, config *PerTestConfig, pattern TestPattern, testVolumeSizeRange e2evolume.SizeRange, accessModes []v1.PersistentVolumeAccessMode) *VolumeResource {
r := VolumeResource{
Config: config,
Pattern: pattern,
@ -75,7 +80,7 @@ func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern Test
if pDriver, ok := driver.(PreprovisionedPVTestDriver); ok {
pvSource, volumeNodeAffinity := pDriver.GetPersistentVolumeSource(false, pattern.FsType, r.Volume)
if pvSource != nil {
r.Pv, r.Pvc = createPVCPV(f, dInfo.Name, pvSource, volumeNodeAffinity, pattern.VolMode, dInfo.RequiredAccessModes)
r.Pv, r.Pvc = createPVCPV(f, dInfo.Name, pvSource, volumeNodeAffinity, pattern.VolMode, accessModes)
r.VolSource = storageutils.CreateVolumeSource(r.Pvc.Name, false /* readOnly */)
}
}
@ -102,13 +107,13 @@ func CreateVolumeResource(driver TestDriver, config *PerTestConfig, pattern Test
switch pattern.VolType {
case DynamicPV:
r.Pv, r.Pvc = createPVCPVFromDynamicProvisionSC(
f, dInfo.Name, claimSize, r.Sc, pattern.VolMode, dInfo.RequiredAccessModes)
f, dInfo.Name, claimSize, r.Sc, pattern.VolMode, accessModes)
r.VolSource = storageutils.CreateVolumeSource(r.Pvc.Name, false /* readOnly */)
case GenericEphemeralVolume:
driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange
claimSize, err := storageutils.GetSizeRangesIntersection(testVolumeSizeRange, driverVolumeSizeRange)
framework.ExpectNoError(err, "determine intersection of test size range %+v and driver size range %+v", testVolumeSizeRange, driverVolumeSizeRange)
r.VolSource = createEphemeralVolumeSource(r.Sc.Name, pattern.VolMode, dInfo.RequiredAccessModes, claimSize)
r.VolSource = createEphemeralVolumeSource(r.Sc.Name, pattern.VolMode, accessModes, claimSize)
}
}
case CSIInlineVolume:

View File

@ -81,6 +81,7 @@ var CSISuites = append(BaseSuites,
InitSnapshottableTestSuite,
InitSnapshottableStressTestSuite,
InitVolumePerformanceTestSuite,
InitReadWriteOncePodTestSuite,
)
func getVolumeOpsFromMetricsForPlugin(ms testutil.Metrics, pluginName string) opCounts {

View File

@ -0,0 +1,220 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testsuites
import (
"context"
"github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
errors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/test/e2e/framework"
e2eevents "k8s.io/kubernetes/test/e2e/framework/events"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
admissionapi "k8s.io/pod-security-admission/api"
)
type readWriteOncePodTestSuite struct {
tsInfo storageframework.TestSuiteInfo
}
var _ storageframework.TestSuite = &readWriteOncePodTestSuite{}
type readWriteOncePodTest struct {
config *storageframework.PerTestConfig
driverCleanup func()
cs clientset.Interface
volume *storageframework.VolumeResource
pods []*v1.Pod
migrationCheck *migrationOpCheck
}
func InitCustomReadWriteOncePodTestSuite(patterns []storageframework.TestPattern) storageframework.TestSuite {
return &readWriteOncePodTestSuite{
tsInfo: storageframework.TestSuiteInfo{
Name: "read-write-once-pod",
TestPatterns: patterns,
FeatureTag: "[Feature:ReadWriteOncePod]",
},
}
}
// InitReadWriteOncePodTestSuite returns a test suite for the ReadWriteOncePod PersistentVolume access mode feature.
func InitReadWriteOncePodTestSuite() storageframework.TestSuite {
// Only covers one test pattern since ReadWriteOncePod enforcement is
// handled through Kubernetes and does not differ across volume types.
patterns := []storageframework.TestPattern{storageframework.DefaultFsDynamicPV}
return InitCustomReadWriteOncePodTestSuite(patterns)
}
func (t *readWriteOncePodTestSuite) GetTestSuiteInfo() storageframework.TestSuiteInfo {
return t.tsInfo
}
func (t *readWriteOncePodTestSuite) SkipUnsupportedTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
driverInfo := driver.GetDriverInfo()
if !driverInfo.Capabilities[storageframework.CapReadWriteOncePod] {
e2eskipper.Skipf("Driver %q doesn't support ReadWriteOncePod - skipping", driverInfo.Name)
}
}
func (t *readWriteOncePodTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) {
var (
driverInfo = driver.GetDriverInfo()
l readWriteOncePodTest
)
// Beware that it also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewFrameworkWithCustomTimeouts("read-write-once-pod", storageframework.GetDriverTimeouts(driver))
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
init := func() {
l = readWriteOncePodTest{}
l.config, l.driverCleanup = driver.PrepareTest(f)
l.cs = f.ClientSet
l.pods = []*v1.Pod{}
l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), driverInfo.InTreePluginName)
}
cleanup := func() {
var errs []error
for _, pod := range l.pods {
framework.Logf("Deleting pod %v", pod.Name)
err := e2epod.DeletePodWithWait(l.cs, pod)
errs = append(errs, err)
}
framework.Logf("Deleting volume %s", l.volume.Pvc.GetName())
err := l.volume.CleanupResource()
errs = append(errs, err)
errs = append(errs, storageutils.TryFunc(l.driverCleanup))
framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource")
l.migrationCheck.validateMigrationVolumeOpCounts()
}
ginkgo.BeforeEach(func() {
init()
ginkgo.DeferCleanup(cleanup)
})
ginkgo.It("should block a second pod from using an in-use ReadWriteOncePod volume", func() {
// Create the ReadWriteOncePod PVC.
accessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}
l.volume = storageframework.CreateVolumeResourceWithAccessModes(driver, l.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange, accessModes)
podConfig := e2epod.Config{
NS: f.Namespace.Name,
PVCs: []*v1.PersistentVolumeClaim{l.volume.Pvc},
SeLinuxLabel: e2epv.SELinuxLabel,
}
// Create the first pod, which will take ownership of the ReadWriteOncePod PVC.
pod1, err := e2epod.MakeSecPod(&podConfig)
framework.ExpectNoError(err, "failed to create spec for pod1")
_, err = l.cs.CoreV1().Pods(pod1.Namespace).Create(context.TODO(), pod1, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create pod1")
err = e2epod.WaitTimeoutForPodRunningInNamespace(l.cs, pod1.Name, pod1.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "failed to wait for pod1 running status")
l.pods = append(l.pods, pod1)
// Create the second pod, which will fail scheduling because the ReadWriteOncePod PVC is already in use.
pod2, err := e2epod.MakeSecPod(&podConfig)
framework.ExpectNoError(err, "failed to create spec for pod2")
_, err = l.cs.CoreV1().Pods(pod2.Namespace).Create(context.TODO(), pod2, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create pod2")
err = e2epod.WaitForPodNameUnschedulableInNamespace(l.cs, pod2.Name, pod2.Namespace)
framework.ExpectNoError(err, "failed to wait for pod2 unschedulable status")
l.pods = append(l.pods, pod2)
// Delete the first pod and observe the second pod can now start.
err = e2epod.DeletePodWithWait(l.cs, pod1)
framework.ExpectNoError(err, "failed to delete pod1")
err = e2epod.WaitTimeoutForPodRunningInNamespace(l.cs, pod2.Name, pod2.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "failed to wait for pod2 running status")
})
ginkgo.It("should block a second pod from using an in-use ReadWriteOncePod volume on the same node", func() {
// Create the ReadWriteOncePod PVC.
accessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}
l.volume = storageframework.CreateVolumeResourceWithAccessModes(driver, l.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange, accessModes)
podConfig := e2epod.Config{
NS: f.Namespace.Name,
PVCs: []*v1.PersistentVolumeClaim{l.volume.Pvc},
SeLinuxLabel: e2epv.SELinuxLabel,
}
// Create the first pod, which will take ownership of the ReadWriteOncePod PVC.
pod1, err := e2epod.MakeSecPod(&podConfig)
framework.ExpectNoError(err, "failed to create spec for pod1")
_, err = l.cs.CoreV1().Pods(pod1.Namespace).Create(context.TODO(), pod1, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create pod1")
err = e2epod.WaitTimeoutForPodRunningInNamespace(l.cs, pod1.Name, pod1.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "failed to wait for pod1 running status")
l.pods = append(l.pods, pod1)
// Get the node name for the first pod now that it's running.
pod1, err = l.cs.CoreV1().Pods(pod1.Namespace).Get(context.TODO(), pod1.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get pod1")
nodeName := pod1.Spec.NodeName
// Create the second pod on the same node as the first pod.
pod2, err := e2epod.MakeSecPod(&podConfig)
framework.ExpectNoError(err, "failed to create spec for pod2")
// Set the node name to that of the first pod.
// Node name is set to bypass scheduling, which would enforce the access mode otherwise.
pod2.Spec.NodeName = nodeName
_, err = l.cs.CoreV1().Pods(pod2.Namespace).Create(context.TODO(), pod2, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create pod2")
l.pods = append(l.pods, pod2)
// Wait for the FailedMount event to be generated for the second pod.
eventSelector := fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.name": pod2.Name,
"involvedObject.namespace": pod2.Namespace,
"reason": events.FailedMountVolume,
}.AsSelector().String()
msg := "volume uses the ReadWriteOncePod access mode and is already in use by another pod"
err = e2eevents.WaitTimeoutForEvent(l.cs, pod2.Namespace, eventSelector, msg, f.Timeouts.PodStart)
framework.ExpectNoError(err, "failed to wait for FailedMount event for pod2")
// Wait for the second pod to fail because it is stuck at container creating.
reason := "ContainerCreating"
err = e2epod.WaitForPodContainerToFail(l.cs, pod2.Namespace, pod2.Name, 0, reason, f.Timeouts.PodStart)
framework.ExpectNoError(err, "failed to wait for pod2 container to fail")
// Delete the first pod and observe the second pod can now start.
err = e2epod.DeletePodWithWait(l.cs, pod1)
framework.ExpectNoError(err, "failed to delete pod1")
err = e2epod.WaitTimeoutForPodRunningInNamespace(l.cs, pod2.Name, pod2.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "failed to wait for pod2 running status")
})
}

View File

@ -218,7 +218,7 @@ spec:
serviceAccountName: csi-hostpathplugin-sa
containers:
- name: hostpath
image: registry.k8s.io/sig-storage/hostpathplugin:v1.7.3
image: registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
args:
- "--drivername=hostpath.csi.k8s.io"
- "--v=5"