Merge pull request #114494 from chrishenzie/readwriteoncepod-beta

Graduate ReadWriteOncePod to beta, updated e2e test
This commit is contained in:
Kubernetes Prow Robot 2023-02-14 16:35:42 -08:00 committed by GitHub
commit 390ddafe9e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 43 additions and 13 deletions

View File

@ -652,7 +652,9 @@ const (
QOSReserved featuregate.Feature = "QOSReserved"
// owner: @chrishenzie
// kep: https://kep.k8s.io/2485
// alpha: v1.22
// beta: v1.27
//
// Enables usage of the ReadWriteOncePod PersistentVolume access mode.
ReadWriteOncePod featuregate.Feature = "ReadWriteOncePod"
@ -1009,7 +1011,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
QOSReserved: {Default: false, PreRelease: featuregate.Alpha},
ReadWriteOncePod: {Default: false, PreRelease: featuregate.Alpha},
ReadWriteOncePod: {Default: true, PreRelease: featuregate.Beta},
RecoverVolumeExpansionFailure: {Default: false, PreRelease: featuregate.Alpha},

View File

@ -22,9 +22,11 @@ import (
"github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1"
schedulingv1 "k8s.io/api/scheduling/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
errors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/test/e2e/framework"
@ -45,9 +47,10 @@ var _ storageframework.TestSuite = &readWriteOncePodTestSuite{}
type readWriteOncePodTest struct {
config *storageframework.PerTestConfig
cs clientset.Interface
volume *storageframework.VolumeResource
pods []*v1.Pod
cs clientset.Interface
volume *storageframework.VolumeResource
pods []*v1.Pod
priorityClass *schedulingv1.PriorityClass
migrationCheck *migrationOpCheck
}
@ -57,7 +60,6 @@ func InitCustomReadWriteOncePodTestSuite(patterns []storageframework.TestPattern
tsInfo: storageframework.TestSuiteInfo{
Name: "read-write-once-pod",
TestPatterns: patterns,
FeatureTag: "[Feature:ReadWriteOncePod]",
},
}
}
@ -112,6 +114,12 @@ func (t *readWriteOncePodTestSuite) DefineTests(driver storageframework.TestDriv
err := l.volume.CleanupResource(ctx)
errs = append(errs, err)
if l.priorityClass != nil {
framework.Logf("Deleting PriorityClass %v", l.priorityClass.Name)
err := l.cs.SchedulingV1().PriorityClasses().Delete(ctx, l.priorityClass.Name, metav1.DeleteOptions{})
errs = append(errs, err)
}
framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource")
l.migrationCheck.validateMigrationVolumeOpCounts(ctx)
}
@ -121,11 +129,18 @@ func (t *readWriteOncePodTestSuite) DefineTests(driver storageframework.TestDriv
ginkgo.DeferCleanup(cleanup)
})
ginkgo.It("should block a second pod from using an in-use ReadWriteOncePod volume", func(ctx context.Context) {
ginkgo.It("should preempt lower priority pods using ReadWriteOncePod volumes", func(ctx context.Context) {
// Create the ReadWriteOncePod PVC.
accessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}
l.volume = storageframework.CreateVolumeResourceWithAccessModes(ctx, driver, l.config, pattern, t.GetTestSuiteInfo().SupportedSizeRange, accessModes)
l.priorityClass = &schedulingv1.PriorityClass{
ObjectMeta: metav1.ObjectMeta{Name: "e2e-test-read-write-once-pod-" + string(uuid.NewUUID())},
Value: int32(1000),
}
_, err := l.cs.SchedulingV1().PriorityClasses().Create(ctx, l.priorityClass, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create priority class")
podConfig := e2epod.Config{
NS: f.Namespace.Name,
PVCs: []*v1.PersistentVolumeClaim{l.volume.Pvc},
@ -141,20 +156,33 @@ func (t *readWriteOncePodTestSuite) DefineTests(driver storageframework.TestDriv
framework.ExpectNoError(err, "failed to wait for pod1 running status")
l.pods = append(l.pods, pod1)
// Create the second pod, which will fail scheduling because the ReadWriteOncePod PVC is already in use.
// Create the second pod, which will preempt the first pod because it's using the
// ReadWriteOncePod PVC and has higher priority.
pod2, err := e2epod.MakeSecPod(&podConfig)
framework.ExpectNoError(err, "failed to create spec for pod2")
pod2.Spec.PriorityClassName = l.priorityClass.Name
_, err = l.cs.CoreV1().Pods(pod2.Namespace).Create(ctx, pod2, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create pod2")
err = e2epod.WaitForPodNameUnschedulableInNamespace(ctx, l.cs, pod2.Name, pod2.Namespace)
framework.ExpectNoError(err, "failed to wait for pod2 unschedulable status")
l.pods = append(l.pods, pod2)
// Delete the first pod and observe the second pod can now start.
err = e2epod.DeletePodWithWait(ctx, l.cs, pod1)
framework.ExpectNoError(err, "failed to delete pod1")
// Wait for the first pod to be preempted and the second pod to start.
err = e2epod.WaitForPodNotFoundInNamespace(ctx, l.cs, pod1.Name, pod1.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "failed to wait for pod1 to be preempted")
err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, l.cs, pod2.Name, pod2.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "failed to wait for pod2 running status")
// Recreate the first pod, which will fail to schedule because the second pod
// is using the ReadWriteOncePod PVC and has higher priority.
_, err = l.cs.CoreV1().Pods(pod1.Namespace).Create(ctx, pod1, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create pod1")
err = e2epod.WaitForPodNameUnschedulableInNamespace(ctx, l.cs, pod1.Name, pod1.Namespace)
framework.ExpectNoError(err, "failed to wait for pod1 unschedulable status")
// Delete the second pod with higher priority and observe the first pod can now start.
err = e2epod.DeletePodWithWait(ctx, l.cs, pod2)
framework.ExpectNoError(err, "failed to delete pod2")
err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, l.cs, pod1.Name, pod1.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "failed to wait for pod1 running status")
})
ginkgo.It("should block a second pod from using an in-use ReadWriteOncePod volume on the same node", func(ctx context.Context) {

View File

@ -218,7 +218,7 @@ spec:
serviceAccountName: csi-hostpathplugin-sa
containers:
- name: hostpath
image: registry.k8s.io/sig-storage/hostpathplugin:v1.9.0
image: registry.k8s.io/sig-storage/hostpathplugin:v1.11.0
args:
- "--drivername=hostpath.csi.k8s.io"
- "--v=5"