Merge pull request #127946 from saku3/add-test-volumebinding-for-queueinghint

add integration test for volumebinding for queueinghint
This commit is contained in:
Kubernetes Prow Robot 2024-11-07 07:41:29 +00:00 committed by GitHub
commit 272bb954b3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 835 additions and 0 deletions

View File

@ -939,6 +939,13 @@ func (p *PersistentVolumeClaimWrapper) Resources(resources v1.VolumeResourceRequ
return p
}
// StorageClassName sets `StorageClassName` as the StorageClassName of the inner
// PersistentVolumeClaim.
func (p *PersistentVolumeClaimWrapper) StorageClassName(name *string) *PersistentVolumeClaimWrapper {
p.PersistentVolumeClaim.Spec.StorageClassName = name
return p
}
// PersistentVolumeWrapper wraps a PersistentVolume inside.
type PersistentVolumeWrapper struct{ v1.PersistentVolume }
@ -1009,6 +1016,12 @@ func (p *PersistentVolumeWrapper) Label(k, v string) *PersistentVolumeWrapper {
return p
}
// StorageClassName sets `StorageClassName` of the inner PersistentVolume.
func (p *PersistentVolumeWrapper) StorageClassName(name string) *PersistentVolumeWrapper {
p.PersistentVolume.Spec.StorageClassName = name
return p
}
// ResourceClaimWrapper wraps a ResourceClaim inside.
type ResourceClaimWrapper struct{ resourceapi.ResourceClaim }
@ -1165,3 +1178,105 @@ func (s *StorageClassWrapper) Provisioner(p string) *StorageClassWrapper {
s.StorageClass.Provisioner = p
return s
}
// AllowedTopologies sets `AllowedTopologies` of the inner StorageClass.
func (s *StorageClassWrapper) AllowedTopologies(topologies []v1.TopologySelectorTerm) *StorageClassWrapper {
s.StorageClass.AllowedTopologies = topologies
return s
}
// Label sets a {k,v} pair to the inner StorageClass label.
func (s *StorageClassWrapper) Label(k, v string) *StorageClassWrapper {
if s.ObjectMeta.Labels == nil {
s.ObjectMeta.Labels = make(map[string]string)
}
s.ObjectMeta.Labels[k] = v
return s
}
// CSINodeWrapper wraps a CSINode inside.
type CSINodeWrapper struct{ storagev1.CSINode }
// MakeCSINode creates a CSINode wrapper.
func MakeCSINode() *CSINodeWrapper {
return &CSINodeWrapper{}
}
// Obj returns the inner CSINode.
func (c *CSINodeWrapper) Obj() *storagev1.CSINode {
return &c.CSINode
}
// Name sets `n` as the name of the inner CSINode.
func (c *CSINodeWrapper) Name(n string) *CSINodeWrapper {
c.SetName(n)
return c
}
// Annotation sets a {k,v} pair to the inner CSINode annotation.
func (c *CSINodeWrapper) Annotation(key, value string) *CSINodeWrapper {
metav1.SetMetaDataAnnotation(&c.ObjectMeta, key, value)
return c
}
// Driver adds a driver to the inner CSINode.
func (c *CSINodeWrapper) Driver(driver storagev1.CSINodeDriver) *CSINodeWrapper {
c.Spec.Drivers = append(c.Spec.Drivers, driver)
return c
}
// CSIDriverWrapper wraps a CSIDriver inside.
type CSIDriverWrapper struct{ storagev1.CSIDriver }
// MakeCSIDriver creates a CSIDriver wrapper.
func MakeCSIDriver() *CSIDriverWrapper {
return &CSIDriverWrapper{}
}
// Obj returns the inner CSIDriver.
func (c *CSIDriverWrapper) Obj() *storagev1.CSIDriver {
return &c.CSIDriver
}
// Name sets `n` as the name of the inner CSIDriver.
func (c *CSIDriverWrapper) Name(n string) *CSIDriverWrapper {
c.SetName(n)
return c
}
// StorageCapacity sets the `StorageCapacity` of the inner CSIDriver.
func (c *CSIDriverWrapper) StorageCapacity(storageCapacity *bool) *CSIDriverWrapper {
c.Spec.StorageCapacity = storageCapacity
return c
}
// CSIStorageCapacityWrapper wraps a CSIStorageCapacity inside.
type CSIStorageCapacityWrapper struct{ storagev1.CSIStorageCapacity }
// MakeCSIStorageCapacity creates a CSIStorageCapacity wrapper.
func MakeCSIStorageCapacity() *CSIStorageCapacityWrapper {
return &CSIStorageCapacityWrapper{}
}
// Obj returns the inner CSIStorageCapacity.
func (c *CSIStorageCapacityWrapper) Obj() *storagev1.CSIStorageCapacity {
return &c.CSIStorageCapacity
}
// Name sets `n` as the name of the inner CSIStorageCapacity.
func (c *CSIStorageCapacityWrapper) Name(n string) *CSIStorageCapacityWrapper {
c.SetName(n)
return c
}
// StorageClassName sets the `StorageClassName` of the inner CSIStorageCapacity.
func (c *CSIStorageCapacityWrapper) StorageClassName(name string) *CSIStorageCapacityWrapper {
c.CSIStorageCapacity.StorageClassName = name
return c
}
// Capacity sets the `Capacity` of the inner CSIStorageCapacity.
func (c *CSIStorageCapacityWrapper) Capacity(capacity *resource.Quantity) *CSIStorageCapacityWrapper {
c.CSIStorageCapacity.Capacity = capacity
return c
}

View File

@ -212,6 +212,14 @@ func TestCoreResourceEnqueue(t *testing.T) {
initialPVCs []*v1.PersistentVolumeClaim
// initialPVs are the list of PersistentVolume to be created at first.
initialPVs []*v1.PersistentVolume
// initialStorageClasses are the list of StorageClass to be created at first.
initialStorageClasses []*storagev1.StorageClass
// initialCSINodes are the list of CSINode to be created at first.
initialCSINodes []*storagev1.CSINode
// initialCSIDrivers are the list of CSIDriver to be created at first.
initialCSIDrivers []*storagev1.CSIDriver
// initialStorageCapacities are the list of CSIStorageCapacity to be created at first.
initialStorageCapacities []*storagev1.CSIStorageCapacity
// pods are the list of Pods to be created.
// All of them are expected to be unschedulable at first.
pods []*v1.Pod
@ -1345,6 +1353,694 @@ func TestCoreResourceEnqueue(t *testing.T) {
wantRequeuedPods: sets.New("pod2"),
enableSchedulingQueueHint: []bool{true},
},
{
name: "Pod rejected with node by the VolumeBinding plugin is requeued when the Node is created",
initialNodes: []*v1.Node{
st.MakeNode().Name("fake-node").Label("node", "fake-node").Label(v1.LabelTopologyZone, "us-east-1b").Obj(),
},
initialPVs: []*v1.PersistentVolume{
st.MakePersistentVolume().
Name("pv1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
NodeAffinityIn(v1.LabelTopologyZone, []string{"us-east-1a"}).
Obj(),
},
initialPVCs: []*v1.PersistentVolumeClaim{
st.MakePersistentVolumeClaim().
Name("pvc1").
Annotation(volume.AnnBindCompleted, "true").
VolumeName("pv1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
Obj(),
},
pods: []*v1.Pod{
st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
},
triggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
node := st.MakeNode().Name("fake-node2").Label(v1.LabelTopologyZone, "us-east-1a").Obj()
if _, err := testCtx.ClientSet.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("failed to create node: %w", err)
}
return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.Add}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
enableSchedulingQueueHint: []bool{true, false},
},
{
name: "Pod rejected with node by the VolumeBinding plugin is requeued when the Node is updated",
initialNodes: []*v1.Node{
st.MakeNode().
Name("fake-node").
Label("node", "fake-node").
Label("aaa", "bbb").
Obj(),
},
initialPVs: []*v1.PersistentVolume{
st.MakePersistentVolume().
Name("pv1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
NodeAffinityIn("aaa", []string{"ccc"}).
HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
Obj(),
},
initialPVCs: []*v1.PersistentVolumeClaim{
st.MakePersistentVolumeClaim().
Name("pvc1").
Annotation(volume.AnnBindCompleted, "true").
VolumeName("pv1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
Obj(),
},
pods: []*v1.Pod{
st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
},
triggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
node := st.MakeNode().Name("fake-node").Label("node", "fake-node").Label("aaa", "ccc").Obj()
if _, err := testCtx.ClientSet.CoreV1().Nodes().Update(testCtx.Ctx, node, metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update node: %w", err)
}
return map[framework.ClusterEvent]uint64{{Resource: framework.Node, ActionType: framework.UpdateNodeLabel}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
enableSchedulingQueueHint: []bool{true, false},
},
{
name: "Pod rejected with node by the VolumeBinding plugin is requeued when the PV is created",
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Label("aaa", "bbb").Obj()},
initialPVs: []*v1.PersistentVolume{
st.MakePersistentVolume().
Name("pv1").
NodeAffinityIn("aaa", []string{"ccc"}).
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
Obj(),
},
initialPVCs: []*v1.PersistentVolumeClaim{
st.MakePersistentVolumeClaim().
Name("pvc1").
VolumeName("pv1").
Annotation(volume.AnnBindCompleted, "true").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
Obj(),
},
pods: []*v1.Pod{
st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
},
triggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
if err := testCtx.ClientSet.CoreV1().PersistentVolumes().Delete(testCtx.Ctx, "pv1", metav1.DeleteOptions{}); err != nil {
return nil, fmt.Errorf("failed to delete pv1: %w", err)
}
pv1 := st.MakePersistentVolume().
Name("pv1").
NodeAffinityIn("aaa", []string{"bbb"}).
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
Obj()
if _, err := testCtx.ClientSet.CoreV1().PersistentVolumes().Create(testCtx.Ctx, pv1, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("failed to create pv: %w", err)
}
return map[framework.ClusterEvent]uint64{{Resource: framework.PersistentVolume, ActionType: framework.Add}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
enableSchedulingQueueHint: []bool{true, false},
},
{
name: "Pod rejected with node by the VolumeBinding plugin is requeued when the PV is updated",
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Label(v1.LabelTopologyZone, "us-east-1a").Obj()},
initialPVs: []*v1.PersistentVolume{
st.MakePersistentVolume().
Name("pv1").
NodeAffinityIn(v1.LabelFailureDomainBetaZone, []string{"us-east-1a"}).
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
Obj()},
initialPVCs: []*v1.PersistentVolumeClaim{
st.MakePersistentVolumeClaim().
Name("pvc1").
VolumeName("pv1").
Annotation(volume.AnnBindCompleted, "true").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
Obj(),
},
pods: []*v1.Pod{
st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
},
triggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
pv1 := st.MakePersistentVolume().
Name("pv1").
NodeAffinityIn(v1.LabelTopologyZone, []string{"us-east-1a"}).
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
Obj()
if _, err := testCtx.ClientSet.CoreV1().PersistentVolumes().Update(testCtx.Ctx, pv1, metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update pv: %w", err)
}
return map[framework.ClusterEvent]uint64{{Resource: framework.PersistentVolume, ActionType: framework.Update}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
enableSchedulingQueueHint: []bool{true, false},
},
{
name: "Pod rejected with node by the VolumeBinding plugin is requeued when the PVC is created",
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
initialPVs: []*v1.PersistentVolume{
st.MakePersistentVolume().Name("pv1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
Obj()},
initialPVCs: []*v1.PersistentVolumeClaim{
st.MakePersistentVolumeClaim().
Name("pvc1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
Obj(),
st.MakePersistentVolumeClaim().
Name("pvc2").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
Obj(),
},
pods: []*v1.Pod{
st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
st.MakePod().Name("pod2").Container("image").PVC("pvc2").Obj(),
},
triggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
if err := testCtx.ClientSet.CoreV1().PersistentVolumeClaims(testCtx.NS.Name).Delete(testCtx.Ctx, "pvc1", metav1.DeleteOptions{}); err != nil {
return nil, fmt.Errorf("failed to delete pvc1: %w", err)
}
pvc1 := st.MakePersistentVolumeClaim().
Name("pvc1").
Annotation(volume.AnnBindCompleted, "true").
VolumeName("pv1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}).
Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
Obj()
if _, err := testCtx.ClientSet.CoreV1().PersistentVolumeClaims(testCtx.NS.Name).Create(testCtx.Ctx, pvc1, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("failed to create pvc: %w", err)
}
return map[framework.ClusterEvent]uint64{{Resource: framework.PersistentVolumeClaim, ActionType: framework.Add}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
enableSchedulingQueueHint: []bool{true},
},
{
name: "Pod rejected with node by the VolumeBinding plugin is requeued when the PVC is updated",
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
initialPVs: []*v1.PersistentVolume{
st.MakePersistentVolume().Name("pv1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
Obj()},
initialPVCs: []*v1.PersistentVolumeClaim{
st.MakePersistentVolumeClaim().
Name("pvc1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
Obj(),
st.MakePersistentVolumeClaim().
Name("pvc2").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
Obj(),
},
pods: []*v1.Pod{
st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
st.MakePod().Name("pod2").Container("image").PVC("pvc2").Obj(),
},
triggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
pvc1 := st.MakePersistentVolumeClaim().
Name("pvc1").
VolumeName("pv1").
Annotation(volume.AnnBindCompleted, "true").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
Obj()
if _, err := testCtx.ClientSet.CoreV1().PersistentVolumeClaims(testCtx.NS.Name).Update(testCtx.Ctx, pvc1, metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update pvc: %w", err)
}
return map[framework.ClusterEvent]uint64{{Resource: framework.PersistentVolumeClaim, ActionType: framework.Update}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
enableSchedulingQueueHint: []bool{true},
},
{
name: "Pod rejected with node by the VolumeBinding plugin is requeued when the StorageClass is created",
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
initialPVs: []*v1.PersistentVolume{
st.MakePersistentVolume().Name("pv1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
StorageClassName("sc1").
Obj()},
initialPVCs: []*v1.PersistentVolumeClaim{
st.MakePersistentVolumeClaim().
Name("pvc1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
StorageClassName(ptr.To("sc1")).
Obj(),
},
pods: []*v1.Pod{
st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
},
triggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
sc1 := st.MakeStorageClass().
Name("sc1").
VolumeBindingMode(storagev1.VolumeBindingWaitForFirstConsumer).
Provisioner("p").
Obj()
if _, err := testCtx.ClientSet.StorageV1().StorageClasses().Create(testCtx.Ctx, sc1, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("failed to create storageclass: %w", err)
}
return map[framework.ClusterEvent]uint64{{Resource: framework.StorageClass, ActionType: framework.Add}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
enableSchedulingQueueHint: []bool{true},
},
{
name: "Pod rejected with node by the VolumeBinding plugin is requeued when the StorageClass's AllowedTopologies is updated",
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Label(v1.LabelTopologyZone, "us-west-1a").Obj()},
initialPVs: []*v1.PersistentVolume{
st.MakePersistentVolume().
Name("pv1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
StorageClassName("sc1").
Obj()},
initialPVCs: []*v1.PersistentVolumeClaim{
st.MakePersistentVolumeClaim().
Name("pvc1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
StorageClassName(ptr.To("sc1")).
Obj(),
},
initialStorageClasses: []*storagev1.StorageClass{
st.MakeStorageClass().
Name("sc1").
VolumeBindingMode(storagev1.VolumeBindingWaitForFirstConsumer).
Provisioner("p").
AllowedTopologies([]v1.TopologySelectorTerm{
{
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
{Key: v1.LabelTopologyZone, Values: []string{"us-west-1c"}},
},
},
}).Obj(),
},
pods: []*v1.Pod{
st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
},
triggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
sc1 := st.MakeStorageClass().
Name("sc1").
VolumeBindingMode(storagev1.VolumeBindingWaitForFirstConsumer).
Provisioner("p").
AllowedTopologies([]v1.TopologySelectorTerm{
{
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
{Key: v1.LabelTopologyZone, Values: []string{"us-west-1a"}},
},
},
}).
Obj()
if _, err := testCtx.ClientSet.StorageV1().StorageClasses().Update(testCtx.Ctx, sc1, metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update storageclass: %w", err)
}
return map[framework.ClusterEvent]uint64{{Resource: framework.StorageClass, ActionType: framework.Update}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
enableSchedulingQueueHint: []bool{true, false},
},
{
name: "Pod rejected with node by the VolumeBinding plugin is not requeued when the StorageClass is updated but the AllowedTopologies is same",
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Label(v1.LabelTopologyZone, "us-west-1c").Obj()},
initialPVs: []*v1.PersistentVolume{
st.MakePersistentVolume().
Name("pv1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
StorageClassName("sc1").
Obj()},
initialPVCs: []*v1.PersistentVolumeClaim{
st.MakePersistentVolumeClaim().
Name("pvc1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
StorageClassName(ptr.To("sc1")).
Obj(),
},
initialStorageClasses: []*storagev1.StorageClass{
st.MakeStorageClass().
Name("sc1").
Label("key", "value").
VolumeBindingMode(storagev1.VolumeBindingWaitForFirstConsumer).
Provisioner("p").
AllowedTopologies([]v1.TopologySelectorTerm{
{
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
{Key: v1.LabelTopologyZone, Values: []string{"us-west-1a"}},
},
},
}).Obj(),
},
pods: []*v1.Pod{
st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
},
triggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
sc1 := st.MakeStorageClass().
Name("sc1").
Label("key", "updated").
VolumeBindingMode(storagev1.VolumeBindingWaitForFirstConsumer).
Provisioner("p").
AllowedTopologies([]v1.TopologySelectorTerm{
{
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
{Key: v1.LabelTopologyZone, Values: []string{"us-west-1a"}},
},
},
}).
Obj()
if _, err := testCtx.ClientSet.StorageV1().StorageClasses().Update(testCtx.Ctx, sc1, metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update storageclass: %w", err)
}
return map[framework.ClusterEvent]uint64{{Resource: framework.StorageClass, ActionType: framework.Update}: 1}, nil
},
wantRequeuedPods: sets.Set[string]{},
enableSchedulingQueueHint: []bool{true},
},
{
name: "Pod rejected with node by the VolumeBinding plugin is requeued when the CSINode is created",
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
initialPVs: []*v1.PersistentVolume{
st.MakePersistentVolume().
Name("pv1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
Obj()},
initialPVCs: []*v1.PersistentVolumeClaim{
st.MakePersistentVolumeClaim().
Name("pvc1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
Obj(),
},
pods: []*v1.Pod{
st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
},
triggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
csinode1 := st.MakeCSINode().Name("fake-node").Obj()
if _, err := testCtx.ClientSet.StorageV1().CSINodes().Create(testCtx.Ctx, csinode1, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("failed to create CSINode: %w", err)
}
return map[framework.ClusterEvent]uint64{{Resource: framework.CSINode, ActionType: framework.Add}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
enableSchedulingQueueHint: []bool{true, false},
},
{
name: "Pod rejected with node by the VolumeBinding plugin is requeued when the CSINode's MigratedPluginsAnnotation is updated",
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
initialPVs: []*v1.PersistentVolume{
st.MakePersistentVolume().Name("pv1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
StorageClassName("sc1").
Obj()},
initialPVCs: []*v1.PersistentVolumeClaim{
st.MakePersistentVolumeClaim().
Name("pvc1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
StorageClassName(ptr.To("sc1")).
Obj(),
},
initialCSINodes: []*storagev1.CSINode{
st.MakeCSINode().Name("fake-node").Obj(),
},
pods: []*v1.Pod{
st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
},
triggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
// When updating CSINodes by using MakeCSINode, an error occurs because the resourceVersion is not specified. Therefore, the actual CSINode is retrieved.
csinode, err := testCtx.ClientSet.StorageV1().CSINodes().Get(testCtx.Ctx, "fake-node", metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("failed to get CSINode: %w", err)
}
metav1.SetMetaDataAnnotation(&csinode.ObjectMeta, v1.MigratedPluginsAnnotationKey, "value")
if _, err := testCtx.ClientSet.StorageV1().CSINodes().Update(testCtx.Ctx, csinode, metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update CSINode: %w", err)
}
return map[framework.ClusterEvent]uint64{{Resource: framework.CSINode, ActionType: framework.Update}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
enableSchedulingQueueHint: []bool{true, false},
},
{
name: "Pod rejected with node by the VolumeBinding plugin is requeued when the CSIDriver's StorageCapacity gets disabled",
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
initialPVs: []*v1.PersistentVolume{
st.MakePersistentVolume().Name("pv1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
Obj()},
initialPVCs: []*v1.PersistentVolumeClaim{
st.MakePersistentVolumeClaim().
Name("pvc1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
Obj(),
},
initialCSIDrivers: []*storagev1.CSIDriver{
st.MakeCSIDriver().Name("csidriver").StorageCapacity(ptr.To(true)).Obj(),
},
pods: []*v1.Pod{
st.MakePod().Name("pod1").Container("image").PVC("pvc1").Volume(
v1.Volume{
Name: "volume",
VolumeSource: v1.VolumeSource{
CSI: &v1.CSIVolumeSource{
Driver: "csidriver",
},
},
},
).Obj(),
},
triggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
// When updating CSIDrivers by using MakeCSIDriver, an error occurs because the resourceVersion is not specified. Therefore, the actual CSIDrivers is retrieved.
csidriver, err := testCtx.ClientSet.StorageV1().CSIDrivers().Get(testCtx.Ctx, "csidriver", metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("failed to get CSIDriver: %w", err)
}
csidriver.Spec.StorageCapacity = ptr.To(false)
if _, err := testCtx.ClientSet.StorageV1().CSIDrivers().Update(testCtx.Ctx, csidriver, metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update CSIDriver: %w", err)
}
return map[framework.ClusterEvent]uint64{{Resource: framework.CSIDriver, ActionType: framework.Update}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
enableSchedulingQueueHint: []bool{true, false},
},
{
name: "Pod rejected with node by the VolumeBinding plugin is not requeued when the CSIDriver is updated but the storage capacity is originally enabled",
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
initialPVs: []*v1.PersistentVolume{
st.MakePersistentVolume().Name("pv1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
Obj()},
initialPVCs: []*v1.PersistentVolumeClaim{
st.MakePersistentVolumeClaim().
Name("pvc1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
Obj(),
},
initialCSIDrivers: []*storagev1.CSIDriver{
st.MakeCSIDriver().Name("csidriver").StorageCapacity(ptr.To(false)).Obj(),
},
pods: []*v1.Pod{
st.MakePod().Name("pod1").Container("image").PVC("pvc1").Volume(
v1.Volume{
Name: "volume",
VolumeSource: v1.VolumeSource{
CSI: &v1.CSIVolumeSource{
Driver: "csidriver",
},
},
},
).Obj(),
},
triggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
// When updating CSIDrivers by using MakeCSIDriver, an error occurs because the resourceVersion is not specified. Therefore, the actual CSIDrivers is retrieved.
csidriver, err := testCtx.ClientSet.StorageV1().CSIDrivers().Get(testCtx.Ctx, "csidriver", metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("failed to get CSIDriver: %w", err)
}
csidriver.Spec.StorageCapacity = ptr.To(true)
if _, err := testCtx.ClientSet.StorageV1().CSIDrivers().Update(testCtx.Ctx, csidriver, metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update CSIDriver: %w", err)
}
return map[framework.ClusterEvent]uint64{{Resource: framework.CSIDriver, ActionType: framework.Update}: 1}, nil
},
wantRequeuedPods: sets.Set[string]{},
enableSchedulingQueueHint: []bool{true},
},
{
name: "Pod rejected with node by the VolumeBinding plugin is requeued when the CSIStorageCapacity is created",
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
initialPVs: []*v1.PersistentVolume{
st.MakePersistentVolume().Name("pv1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
StorageClassName("sc1").
Obj()},
initialPVCs: []*v1.PersistentVolumeClaim{
st.MakePersistentVolumeClaim().
Name("pvc1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
StorageClassName(ptr.To("sc1")).
Obj(),
},
initialStorageClasses: []*storagev1.StorageClass{
st.MakeStorageClass().
Name("sc1").
VolumeBindingMode(storagev1.VolumeBindingImmediate).
Provisioner("p").
Obj(),
},
pods: []*v1.Pod{
st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
},
triggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
csc := st.MakeCSIStorageCapacity().Name("csc").StorageClassName("sc1").Capacity(resource.NewQuantity(10, resource.BinarySI)).Obj()
if _, err := testCtx.ClientSet.StorageV1().CSIStorageCapacities(testCtx.NS.Name).Create(testCtx.Ctx, csc, metav1.CreateOptions{}); err != nil {
return nil, fmt.Errorf("failed to create CSIStorageCapacity: %w", err)
}
return map[framework.ClusterEvent]uint64{{Resource: framework.CSIStorageCapacity, ActionType: framework.Add}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
enableSchedulingQueueHint: []bool{true, false},
},
{
name: "Pod rejected with node by the VolumeBinding plugin is requeued when the CSIStorageCapacity is increased",
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
initialPVs: []*v1.PersistentVolume{
st.MakePersistentVolume().Name("pv1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
StorageClassName("sc1").
Obj()},
initialPVCs: []*v1.PersistentVolumeClaim{
st.MakePersistentVolumeClaim().
Name("pvc1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
StorageClassName(ptr.To("sc1")).
Obj(),
},
initialStorageCapacities: []*storagev1.CSIStorageCapacity{
st.MakeCSIStorageCapacity().Name("csc").StorageClassName("sc1").Capacity(resource.NewQuantity(10, resource.BinarySI)).Obj(),
},
initialStorageClasses: []*storagev1.StorageClass{
st.MakeStorageClass().
Name("sc1").
VolumeBindingMode(storagev1.VolumeBindingImmediate).
Provisioner("p").
Obj(),
},
pods: []*v1.Pod{
st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
},
triggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
// When updating CSIStorageCapacities by using MakeCSIStorageCapacity, an error occurs because the resourceVersion is not specified. Therefore, the actual CSIStorageCapacities is retrieved.
csc, err := testCtx.ClientSet.StorageV1().CSIStorageCapacities(testCtx.NS.Name).Get(testCtx.Ctx, "csc", metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("failed to get CSIStorageCapacity: %w", err)
}
csc.Capacity = resource.NewQuantity(20, resource.BinarySI)
if _, err := testCtx.ClientSet.StorageV1().CSIStorageCapacities(testCtx.NS.Name).Update(testCtx.Ctx, csc, metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update CSIStorageCapacity: %w", err)
}
return map[framework.ClusterEvent]uint64{{Resource: framework.CSIStorageCapacity, ActionType: framework.Update}: 1}, nil
},
wantRequeuedPods: sets.New("pod1"),
enableSchedulingQueueHint: []bool{true, false},
},
{
name: "Pod rejected with node by the VolumeBinding plugin is not requeued when the CSIStorageCapacity is updated but the volumelimit is not increased",
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node").Label("node", "fake-node").Obj()},
initialPVs: []*v1.PersistentVolume{
st.MakePersistentVolume().Name("pv1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Capacity(v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}).
HostPathVolumeSource(&v1.HostPathVolumeSource{Path: "/tmp", Type: ptr.To(v1.HostPathDirectoryOrCreate)}).
Obj()},
initialPVCs: []*v1.PersistentVolumeClaim{
st.MakePersistentVolumeClaim().
Name("pvc1").
AccessModes([]v1.PersistentVolumeAccessMode{v1.ReadWriteMany}).
Resources(v1.VolumeResourceRequirements{Requests: v1.ResourceList{v1.ResourceStorage: resource.MustParse("1Mi")}}).
Obj(),
},
initialStorageCapacities: []*storagev1.CSIStorageCapacity{
st.MakeCSIStorageCapacity().Name("csc").StorageClassName("sc1").Capacity(resource.NewQuantity(10, resource.BinarySI)).Obj(),
},
initialStorageClasses: []*storagev1.StorageClass{
st.MakeStorageClass().
Name("sc1").
VolumeBindingMode(storagev1.VolumeBindingImmediate).
Provisioner("p").
Obj(),
},
pods: []*v1.Pod{
st.MakePod().Name("pod1").Container("image").PVC("pvc1").Obj(),
},
triggerFn: func(testCtx *testutils.TestContext) (map[framework.ClusterEvent]uint64, error) {
// When updating CSIStorageCapacities by using MakeCSIStorageCapacity, an error occurs because the resourceVersion is not specified. Therefore, the actual CSIStorageCapacities is retrieved.
csc, err := testCtx.ClientSet.StorageV1().CSIStorageCapacities(testCtx.NS.Name).Get(testCtx.Ctx, "csc", metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("failed to get CSIStorageCapacity: %w", err)
}
csc.Capacity = resource.NewQuantity(5, resource.BinarySI)
if _, err := testCtx.ClientSet.StorageV1().CSIStorageCapacities(testCtx.NS.Name).Update(testCtx.Ctx, csc, metav1.UpdateOptions{}); err != nil {
return nil, fmt.Errorf("failed to update CSIStorageCapacity: %w", err)
}
return map[framework.ClusterEvent]uint64{{Resource: framework.CSIStorageCapacity, ActionType: framework.Update}: 1}, nil
},
wantRequeuedPods: sets.Set[string]{},
enableSchedulingQueueHint: []bool{true},
},
}
for _, tt := range tests {
@ -1379,6 +2075,30 @@ func TestCoreResourceEnqueue(t *testing.T) {
}
}
for _, csinode := range tt.initialCSINodes {
if _, err := testCtx.ClientSet.StorageV1().CSINodes().Create(ctx, csinode, metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to create a CSINode %q: %v", csinode.Name, err)
}
}
for _, csc := range tt.initialStorageCapacities {
if _, err := cs.StorageV1().CSIStorageCapacities(ns).Create(ctx, csc, metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to create a CSIStorageCapacity %q: %v", csc.Name, err)
}
}
for _, csidriver := range tt.initialCSIDrivers {
if _, err := cs.StorageV1().CSIDrivers().Create(ctx, csidriver, metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to create a CSIDriver %q: %v", csidriver.Name, err)
}
}
for _, sc := range tt.initialStorageClasses {
if _, err := cs.StorageV1().StorageClasses().Create(testCtx.Ctx, sc, metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to create a StorageClass %q: %v", sc.Name, err)
}
}
for _, pv := range tt.initialPVs {
if _, err := testutils.CreatePV(cs, pv); err != nil {
t.Fatalf("Failed to create a PV %q: %v", pv.Name, err)