diff --git a/pkg/scheduler/testing/wrappers.go b/pkg/scheduler/testing/wrappers.go index 33f6e15e1be..3114f1c52aa 100644 --- a/pkg/scheduler/testing/wrappers.go +++ b/pkg/scheduler/testing/wrappers.go @@ -581,10 +581,7 @@ func (p *PodWrapper) Labels(labels map[string]string) *PodWrapper { // Annotation sets a {k,v} pair to the inner pod annotation. func (p *PodWrapper) Annotation(key, value string) *PodWrapper { - if p.ObjectMeta.Annotations == nil { - p.ObjectMeta.Annotations = make(map[string]string) - } - p.ObjectMeta.Annotations[key] = value + metav1.SetMetaDataAnnotation(&p.ObjectMeta, key, value) return p } @@ -695,3 +692,94 @@ func (n *NodeWrapper) Taints(taints []v1.Taint) *NodeWrapper { n.Spec.Taints = taints return n } + +// PersistentVolumeClaimWrapper wraps a PersistentVolumeClaim inside. +type PersistentVolumeClaimWrapper struct{ v1.PersistentVolumeClaim } + +// MakePersistentVolumeClaim creates a PersistentVolumeClaim wrapper. +func MakePersistentVolumeClaim() *PersistentVolumeClaimWrapper { + return &PersistentVolumeClaimWrapper{} +} + +// Obj returns the inner PersistentVolumeClaim. +func (p *PersistentVolumeClaimWrapper) Obj() *v1.PersistentVolumeClaim { + return &p.PersistentVolumeClaim +} + +// Name sets `s` as the name of the inner PersistentVolumeClaim. +func (p *PersistentVolumeClaimWrapper) Name(s string) *PersistentVolumeClaimWrapper { + p.SetName(s) + return p +} + +// Namespace sets `s` as the namespace of the inner PersistentVolumeClaim. +func (p *PersistentVolumeClaimWrapper) Namespace(s string) *PersistentVolumeClaimWrapper { + p.SetNamespace(s) + return p +} + +// Annotation sets a {k,v} pair to the inner PersistentVolumeClaim. +func (p *PersistentVolumeClaimWrapper) Annotation(key, value string) *PersistentVolumeClaimWrapper { + metav1.SetMetaDataAnnotation(&p.ObjectMeta, key, value) + return p +} + +// VolumeName sets `name` as the volume name of the inner +// PersistentVolumeClaim. +func (p *PersistentVolumeClaimWrapper) VolumeName(name string) *PersistentVolumeClaimWrapper { + p.PersistentVolumeClaim.Spec.VolumeName = name + return p +} + +// AccessModes sets `accessModes` as the access modes of the inner +// PersistentVolumeClaim. +func (p *PersistentVolumeClaimWrapper) AccessModes(accessModes []v1.PersistentVolumeAccessMode) *PersistentVolumeClaimWrapper { + p.PersistentVolumeClaim.Spec.AccessModes = accessModes + return p +} + +// Resources sets `resources` as the resource requirements of the inner +// PersistentVolumeClaim. +func (p *PersistentVolumeClaimWrapper) Resources(resources v1.ResourceRequirements) *PersistentVolumeClaimWrapper { + p.PersistentVolumeClaim.Spec.Resources = resources + return p +} + +// PersistentVolumeWrapper wraps a PersistentVolume inside. +type PersistentVolumeWrapper struct{ v1.PersistentVolume } + +// MakePersistentVolume creates a PersistentVolume wrapper. +func MakePersistentVolume() *PersistentVolumeWrapper { + return &PersistentVolumeWrapper{} +} + +// Obj returns the inner PersistentVolume. +func (p *PersistentVolumeWrapper) Obj() *v1.PersistentVolume { + return &p.PersistentVolume +} + +// Name sets `s` as the name of the inner PersistentVolume. +func (p *PersistentVolumeWrapper) Name(s string) *PersistentVolumeWrapper { + p.SetName(s) + return p +} + +// AccessModes sets `accessModes` as the access modes of the inner +// PersistentVolume. +func (p *PersistentVolumeWrapper) AccessModes(accessModes []v1.PersistentVolumeAccessMode) *PersistentVolumeWrapper { + p.PersistentVolume.Spec.AccessModes = accessModes + return p +} + +// Capacity sets `capacity` as the resource list of the inner PersistentVolume. +func (p *PersistentVolumeWrapper) Capacity(capacity v1.ResourceList) *PersistentVolumeWrapper { + p.PersistentVolume.Spec.Capacity = capacity + return p +} + +// HostPathVolumeSource sets `src` as the host path volume source of the inner +// PersistentVolume. +func (p *PersistentVolumeWrapper) HostPathVolumeSource(src *v1.HostPathVolumeSource) *PersistentVolumeWrapper { + p.PersistentVolume.Spec.HostPath = src + return p +} diff --git a/test/integration/scheduler/filters/filters_test.go b/test/integration/scheduler/filters/filters_test.go index 8fe07407fc7..a2f5aee00c3 100644 --- a/test/integration/scheduler/filters/filters_test.go +++ b/test/integration/scheduler/filters/filters_test.go @@ -24,11 +24,13 @@ import ( v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/kubernetes" featuregatetesting "k8s.io/component-base/featuregate/testing" + "k8s.io/component-helpers/storage/volume" "k8s.io/kubernetes/pkg/features" st "k8s.io/kubernetes/pkg/scheduler/testing" testutils "k8s.io/kubernetes/test/integration/util" @@ -1540,10 +1542,11 @@ var ( func TestUnschedulablePodBecomesSchedulable(t *testing.T) { tests := []struct { - name string - init func(kubernetes.Interface, string) error - pod *testutils.PausePodConfig - update func(kubernetes.Interface, string) error + name string + init func(kubernetes.Interface, string) error + pod *testutils.PausePodConfig + update func(kubernetes.Interface, string) error + enableReadWriteOncePod bool }{ { name: "node gets added", @@ -1687,9 +1690,76 @@ func TestUnschedulablePodBecomesSchedulable(t *testing.T) { return nil }, }, + { + name: "scheduled pod uses read-write-once-pod pvc", + init: func(cs kubernetes.Interface, ns string) error { + _, err := createNode(cs, st.MakeNode().Name("node").Obj()) + if err != nil { + return fmt.Errorf("cannot create node: %v", err) + } + + storage := v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}} + volType := v1.HostPathDirectoryOrCreate + pv, err := testutils.CreatePV(cs, st.MakePersistentVolume(). + Name("pv-with-read-write-once-pod"). + AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}). + Capacity(storage.Requests). + HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/mnt", Type: &volType}). + Obj()) + if err != nil { + return fmt.Errorf("cannot create pv: %v", err) + } + pvc, err := testutils.CreatePVC(cs, st.MakePersistentVolumeClaim(). + Name("pvc-with-read-write-once-pod"). + Namespace(ns). + // Annotation and volume name required for PVC to be considered bound. + Annotation(volume.AnnBindCompleted, "true"). + VolumeName(pv.Name). + AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}). + Resources(storage). + Obj()) + if err != nil { + return fmt.Errorf("cannot create pvc: %v", err) + } + + pod := initPausePod(&testutils.PausePodConfig{ + Name: "pod-to-be-deleted", + Namespace: ns, + Volumes: []v1.Volume{{ + Name: "volume", + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: pvc.Name, + }, + }, + }}, + }) + if _, err := createPausePod(cs, pod); err != nil { + return fmt.Errorf("cannot create pod: %v", err) + } + return nil + }, + pod: &testutils.PausePodConfig{ + Name: "pod-to-take-over-pvc", + Volumes: []v1.Volume{{ + Name: "volume", + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc-with-read-write-once-pod", + }, + }, + }}, + }, + update: func(cs kubernetes.Interface, ns string) error { + return deletePod(cs, "pod-to-be-deleted", ns) + }, + enableReadWriteOncePod: true, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ReadWriteOncePod, tt.enableReadWriteOncePod)() + testCtx := initTest(t, "scheduler-informer") defer testutils.CleanupTest(t, testCtx) diff --git a/test/integration/util/util.go b/test/integration/util/util.go index b88a856a6be..13335b3b448 100644 --- a/test/integration/util/util.go +++ b/test/integration/util/util.go @@ -610,6 +610,7 @@ type PausePodConfig struct { Priority *int32 PreemptionPolicy *v1.PreemptionPolicy PriorityClassName string + Volumes []v1.Volume } // InitPausePod initializes a pod API object from the given config. It is used @@ -637,6 +638,7 @@ func InitPausePod(conf *PausePodConfig) *v1.Pod { Priority: conf.Priority, PreemptionPolicy: conf.PreemptionPolicy, PriorityClassName: conf.PriorityClassName, + Volumes: conf.Volumes, }, } if conf.Resources != nil { @@ -674,6 +676,18 @@ func CreatePausePodWithResource(cs clientset.Interface, podName string, return CreatePausePod(cs, InitPausePod(&conf)) } +// CreatePVC creates a PersistentVolumeClaim with the given config and returns +// its pointer and error status. +func CreatePVC(cs clientset.Interface, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) { + return cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) +} + +// CreatePV creates a PersistentVolume with the given config and returns its +// pointer and error status. +func CreatePV(cs clientset.Interface, pv *v1.PersistentVolume) (*v1.PersistentVolume, error) { + return cs.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}) +} + // RunPausePod creates a pod with "Pause" image and the given config and waits // until it is scheduled. It returns its pointer and error status. func RunPausePod(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error) {