diff --git a/pkg/scheduler/testing/wrappers.go b/pkg/scheduler/testing/wrappers.go index 94918f3c5a7..fcfa8dfca2b 100644 --- a/pkg/scheduler/testing/wrappers.go +++ b/pkg/scheduler/testing/wrappers.go @@ -22,6 +22,7 @@ import ( v1 "k8s.io/api/core/v1" resourceapi "k8s.io/api/resource/v1alpha3" + storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -1131,3 +1132,34 @@ func (wrapper *ResourceSliceWrapper) Device(name string, attrs map[resourceapi.Q wrapper.Spec.Devices = append(wrapper.Spec.Devices, resourceapi.Device{Name: name, Basic: &resourceapi.BasicDevice{Attributes: attrs}}) return wrapper } + +// StorageClassWrapper wraps a StorageClass inside. +type StorageClassWrapper struct{ storagev1.StorageClass } + +// MakeStorageClass creates a StorageClass wrapper. +func MakeStorageClass() *StorageClassWrapper { + return &StorageClassWrapper{} +} + +// Obj returns the inner StorageClass. +func (s *StorageClassWrapper) Obj() *storagev1.StorageClass { + return &s.StorageClass +} + +// Name sets `n` as the name of the inner StorageClass. +func (s *StorageClassWrapper) Name(n string) *StorageClassWrapper { + s.SetName(n) + return s +} + +// VolumeBindingMode sets mode as the mode of the inner StorageClass. +func (s *StorageClassWrapper) VolumeBindingMode(mode storagev1.VolumeBindingMode) *StorageClassWrapper { + s.StorageClass.VolumeBindingMode = &mode + return s +} + +// Provisoner sets p as the provisioner of the inner StorageClass. +func (s *StorageClassWrapper) Provisioner(p string) *StorageClassWrapper { + s.StorageClass.Provisioner = p + return s +} diff --git a/test/integration/scheduler/queue_test.go b/test/integration/scheduler/queue_test.go index b91499a4e6e..98c05e03a6a 100644 --- a/test/integration/scheduler/queue_test.go +++ b/test/integration/scheduler/queue_test.go @@ -23,6 +23,7 @@ import ( "time" v1 "k8s.io/api/core/v1" + storagev1 "k8s.io/api/storage/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" "k8s.io/apimachinery/pkg/api/errors" @@ -800,7 +801,7 @@ func TestCoreResourceEnqueue(t *testing.T) { HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: &volType}). Obj() if _, err := testCtx.ClientSet.CoreV1().PersistentVolumes().Create(testCtx.Ctx, pv2, metav1.CreateOptions{}); err != nil { - return fmt.Errorf("failed to update pod1: %w", err) + return fmt.Errorf("failed to create pv2: %w", err) } return nil }, @@ -855,13 +856,221 @@ func TestCoreResourceEnqueue(t *testing.T) { HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: &volType}). Obj() if _, err := testCtx.ClientSet.CoreV1().PersistentVolumes().Update(testCtx.Ctx, pv2, metav1.UpdateOptions{}); err != nil { - return fmt.Errorf("failed to update pod1: %w", err) + return fmt.Errorf("failed to update pv2: %w", err) } return nil }, wantRequeuedPods: sets.New("pod2"), enableSchedulingQueueHint: []bool{true}, }, + { + name: "Pod rejected with node by the VolumeZone plugin is requeued when the PVC is added", + initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Label(v1.LabelTopologyZone, "us-west1-a").Obj()}, + initialPVs: []*v1.PersistentVolume{ + st.MakePersistentVolume(). + Name("pv1"). + Labels(map[string]string{v1.LabelTopologyZone: "us-west1-a"}). + AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}). + Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}). + HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: &volType}). + Obj(), + st.MakePersistentVolume(). + Name("pv2"). + Labels(map[string]string{v1.LabelTopologyZone: "us-east1"}). + AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}). + Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}). + HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: &volType}). + Obj(), + }, + initialPVCs: []*v1.PersistentVolumeClaim{ + st.MakePersistentVolumeClaim(). + Name("pvc1"). + Annotation(volume.AnnBindCompleted, "true"). + VolumeName("pv1"). + AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}). + Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}). + Obj(), + }, + initialPods: []*v1.Pod{ + st.MakePod().Name("pod1").Container("image").PVC("pvc1").Node("fake-node").Obj(), + }, + pods: []*v1.Pod{ + st.MakePod().Name("pod2").Container("image").PVC("pvc2").Obj(), + }, + triggerFn: func(testCtx *testutils.TestContext) error { + pvc2 := st.MakePersistentVolumeClaim(). + Name("pvc2"). + Annotation(volume.AnnBindCompleted, "true"). + VolumeName("pv2"). + AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}). + Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}). + Obj() + if _, err := testCtx.ClientSet.CoreV1().PersistentVolumeClaims(testCtx.NS.Name).Create(testCtx.Ctx, pvc2, metav1.CreateOptions{}); err != nil { + return fmt.Errorf("failed to add pvc2: %w", err) + } + return nil + }, + wantRequeuedPods: sets.New("pod2"), + enableSchedulingQueueHint: []bool{true}, + }, + { + name: "Pod rejected with node by the VolumeZone plugin is requeued when the PVC is updated", + initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Label(v1.LabelTopologyZone, "us-west1-a").Obj()}, + initialPVs: []*v1.PersistentVolume{ + st.MakePersistentVolume(). + Name("pv1"). + Labels(map[string]string{v1.LabelTopologyZone: "us-west1-a"}). + AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}). + Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}). + HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: &volType}). + Obj(), + st.MakePersistentVolume(). + Name("pv2"). + Labels(map[string]string{v1.LabelTopologyZone: "us-east1"}). + AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}). + Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}). + HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: &volType}). + Obj(), + }, + initialPVCs: []*v1.PersistentVolumeClaim{ + st.MakePersistentVolumeClaim(). + Name("pvc1"). + Annotation(volume.AnnBindCompleted, "true"). + VolumeName("pv1"). + AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}). + Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}). + Obj(), + st.MakePersistentVolumeClaim(). + Name("pvc2"). + Annotation(volume.AnnBindCompleted, "true"). + AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}). + Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}). + Obj(), + }, + initialPods: []*v1.Pod{ + st.MakePod().Name("pod1").Container("image").PVC("pvc1").Node("fake-node").Obj(), + }, + pods: []*v1.Pod{ + st.MakePod().Name("pod2").Container("image").PVC("pvc2").Obj(), + }, + triggerFn: func(testCtx *testutils.TestContext) error { + pvc2 := st.MakePersistentVolumeClaim(). + Name("pvc2"). + Annotation(volume.AnnBindCompleted, "true"). + VolumeName("pv2"). + AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}). + Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}). + Obj() + if _, err := testCtx.ClientSet.CoreV1().PersistentVolumeClaims(testCtx.NS.Name).Update(testCtx.Ctx, pvc2, metav1.UpdateOptions{}); err != nil { + return fmt.Errorf("failed to update pvc2: %w", err) + } + return nil + }, + wantRequeuedPods: sets.New("pod2"), + enableSchedulingQueueHint: []bool{true}, + }, + { + name: "Pod rejected with node by the VolumeZone plugin is requeued when the Storage class is added", + initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Label(v1.LabelTopologyZone, "us-west1-a").Obj()}, + initialPVs: []*v1.PersistentVolume{ + st.MakePersistentVolume(). + Name("pv1"). + Labels(map[string]string{v1.LabelTopologyZone: "us-west1-a"}). + AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}). + Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}). + HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: &volType}). + Obj(), + }, + initialPVCs: []*v1.PersistentVolumeClaim{ + st.MakePersistentVolumeClaim(). + Name("pvc1"). + Annotation(volume.AnnBindCompleted, "true"). + VolumeName("pv1"). + AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}). + Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}). + Obj(), + st.MakePersistentVolumeClaim(). + Name("pvc2"). + Annotation(volume.AnnBindCompleted, "true"). + VolumeName("pv2"). + AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}). + Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}). + Obj(), + }, + initialPods: []*v1.Pod{ + st.MakePod().Name("pod1").Container("image").PVC("pvc1").Node("fake-node").Obj(), + }, + pods: []*v1.Pod{ + st.MakePod().Name("pod2").Container("image").PVC("pvc2").Obj(), + }, + triggerFn: func(testCtx *testutils.TestContext) error { + sc1 := st.MakeStorageClass(). + Name("sc1"). + VolumeBindingMode(storagev1.VolumeBindingWaitForFirstConsumer). + Provisioner("p"). + Obj() + if _, err := testCtx.ClientSet.StorageV1().StorageClasses().Create(testCtx.Ctx, sc1, metav1.CreateOptions{}); err != nil { + return fmt.Errorf("failed to create sc1: %w", err) + } + return nil + }, + wantRequeuedPods: sets.New("pod2"), + enableSchedulingQueueHint: []bool{true}, + }, + { + name: "Pod rejected with node by the VolumeZone plugin is not requeued when the unrelated label of PV is updated", + initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Label(v1.LabelTopologyZone, "us-west1-a").Obj()}, + initialPVs: []*v1.PersistentVolume{ + st.MakePersistentVolume(). + Name("pv1"). + Labels(map[string]string{v1.LabelTopologyZone: "us-west1-a"}). + AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}). + Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}). + HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: &volType}). + Obj(), + st.MakePersistentVolume(). + Name("pv2"). + Labels(map[string]string{v1.LabelTopologyZone: "us-east1"}). + AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}). + Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}). + HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: &volType}). + Obj(), + }, + initialPVCs: []*v1.PersistentVolumeClaim{ + st.MakePersistentVolumeClaim(). + Name("pvc1"). + Annotation(volume.AnnBindCompleted, "true"). + VolumeName("pv1"). + AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}). + Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}). + Obj(), + st.MakePersistentVolumeClaim(). + Name("pvc2"). + Annotation(volume.AnnBindCompleted, "true"). + AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod}). + Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}). + Obj(), + }, + initialPods: []*v1.Pod{ + st.MakePod().Name("pod1").Container("image").PVC("pvc1").Node("fake-node").Obj(), + }, + pods: []*v1.Pod{ + st.MakePod().Name("pod2").Container("image").PVC("pvc2").Obj(), + }, + triggerFn: func(testCtx *testutils.TestContext) error { + pv2 := st.MakePersistentVolume().Name("pv2").Labels(map[string]string{v1.LabelTopologyZone: "us-east1"}). + AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}). + Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}). + HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: &volType}). + Obj() + if _, err := testCtx.ClientSet.CoreV1().PersistentVolumes().Update(testCtx.Ctx, pv2, metav1.UpdateOptions{}); err != nil { + return fmt.Errorf("failed to update pv2: %w", err) + } + return nil + }, + wantRequeuedPods: sets.Set[string]{}, + enableSchedulingQueueHint: []bool{true}, + }, } for _, tt := range tests {