mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Merge pull request #103072 from ikeeip/scheduler_volumebinding
Remove "pkg/controller/volume/scheduling" dependency from "pkg/scheduler/framework/plugins"
This commit is contained in:
commit
b6d83f0ba3
@ -1,9 +0,0 @@
|
|||||||
# See the OWNERS docs at https://go.k8s.io/owners
|
|
||||||
|
|
||||||
approvers:
|
|
||||||
- msau42
|
|
||||||
- cofyc
|
|
||||||
reviewers:
|
|
||||||
- msau42
|
|
||||||
- cofyc
|
|
||||||
- lichuqiang
|
|
@ -1,6 +1,11 @@
|
|||||||
# See the OWNERS docs at https://go.k8s.io/owners
|
# See the OWNERS docs at https://go.k8s.io/owners
|
||||||
|
|
||||||
|
approvers:
|
||||||
|
- sig-storage-approvers
|
||||||
|
- cofyc
|
||||||
reviewers:
|
reviewers:
|
||||||
- sig-storage-reviewers
|
- sig-storage-reviewers
|
||||||
|
- cofyc
|
||||||
|
- lichuqiang
|
||||||
labels:
|
labels:
|
||||||
- sig/storage
|
- sig/storage
|
||||||
|
@ -14,11 +14,10 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package scheduling
|
package volumebinding
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
storagehelpers "k8s.io/component-helpers/storage/volume"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
@ -27,6 +26,7 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
|
storagehelpers "k8s.io/component-helpers/storage/volume"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AssumeCache is a cache on top of the informer that allows for updating
|
// AssumeCache is a cache on top of the informer that allows for updating
|
@ -14,29 +14,17 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package scheduling
|
package volumebinding
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func makePV(name, version, storageClass string) *v1.PersistentVolume {
|
|
||||||
return &v1.PersistentVolume{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
ResourceVersion: version,
|
|
||||||
},
|
|
||||||
Spec: v1.PersistentVolumeSpec{
|
|
||||||
StorageClassName: storageClass,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func verifyListPVs(t *testing.T, cache PVAssumeCache, expectedPVs map[string]*v1.PersistentVolume, storageClassName string) {
|
func verifyListPVs(t *testing.T, cache PVAssumeCache, expectedPVs map[string]*v1.PersistentVolume, storageClassName string) {
|
||||||
pvList := cache.ListPVs(storageClassName)
|
pvList := cache.ListPVs(storageClassName)
|
||||||
if len(pvList) != len(expectedPVs) {
|
if len(pvList) != len(expectedPVs) {
|
||||||
@ -71,38 +59,38 @@ func TestAssumePV(t *testing.T) {
|
|||||||
shouldSucceed bool
|
shouldSucceed bool
|
||||||
}{
|
}{
|
||||||
"success-same-version": {
|
"success-same-version": {
|
||||||
oldPV: makePV("pv1", "5", ""),
|
oldPV: makePV("pv1", "").withVersion("5").PersistentVolume,
|
||||||
newPV: makePV("pv1", "5", ""),
|
newPV: makePV("pv1", "").withVersion("5").PersistentVolume,
|
||||||
shouldSucceed: true,
|
shouldSucceed: true,
|
||||||
},
|
},
|
||||||
"success-storageclass-same-version": {
|
"success-storageclass-same-version": {
|
||||||
oldPV: makePV("pv1", "5", "class1"),
|
oldPV: makePV("pv1", "class1").withVersion("5").PersistentVolume,
|
||||||
newPV: makePV("pv1", "5", "class1"),
|
newPV: makePV("pv1", "class1").withVersion("5").PersistentVolume,
|
||||||
shouldSucceed: true,
|
shouldSucceed: true,
|
||||||
},
|
},
|
||||||
"success-new-higher-version": {
|
"success-new-higher-version": {
|
||||||
oldPV: makePV("pv1", "5", ""),
|
oldPV: makePV("pv1", "").withVersion("5").PersistentVolume,
|
||||||
newPV: makePV("pv1", "6", ""),
|
newPV: makePV("pv1", "").withVersion("6").PersistentVolume,
|
||||||
shouldSucceed: true,
|
shouldSucceed: true,
|
||||||
},
|
},
|
||||||
"fail-old-not-found": {
|
"fail-old-not-found": {
|
||||||
oldPV: makePV("pv2", "5", ""),
|
oldPV: makePV("pv2", "").withVersion("5").PersistentVolume,
|
||||||
newPV: makePV("pv1", "5", ""),
|
newPV: makePV("pv1", "").withVersion("5").PersistentVolume,
|
||||||
shouldSucceed: false,
|
shouldSucceed: false,
|
||||||
},
|
},
|
||||||
"fail-new-lower-version": {
|
"fail-new-lower-version": {
|
||||||
oldPV: makePV("pv1", "5", ""),
|
oldPV: makePV("pv1", "").withVersion("5").PersistentVolume,
|
||||||
newPV: makePV("pv1", "4", ""),
|
newPV: makePV("pv1", "").withVersion("4").PersistentVolume,
|
||||||
shouldSucceed: false,
|
shouldSucceed: false,
|
||||||
},
|
},
|
||||||
"fail-new-bad-version": {
|
"fail-new-bad-version": {
|
||||||
oldPV: makePV("pv1", "5", ""),
|
oldPV: makePV("pv1", "").withVersion("5").PersistentVolume,
|
||||||
newPV: makePV("pv1", "a", ""),
|
newPV: makePV("pv1", "").withVersion("a").PersistentVolume,
|
||||||
shouldSucceed: false,
|
shouldSucceed: false,
|
||||||
},
|
},
|
||||||
"fail-old-bad-version": {
|
"fail-old-bad-version": {
|
||||||
oldPV: makePV("pv1", "a", ""),
|
oldPV: makePV("pv1", "").withVersion("a").PersistentVolume,
|
||||||
newPV: makePV("pv1", "5", ""),
|
newPV: makePV("pv1", "").withVersion("5").PersistentVolume,
|
||||||
shouldSucceed: false,
|
shouldSucceed: false,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -148,8 +136,8 @@ func TestRestorePV(t *testing.T) {
|
|||||||
t.Fatalf("Failed to get internal cache")
|
t.Fatalf("Failed to get internal cache")
|
||||||
}
|
}
|
||||||
|
|
||||||
oldPV := makePV("pv1", "5", "")
|
oldPV := makePV("pv1", "").withVersion("5").PersistentVolume
|
||||||
newPV := makePV("pv1", "5", "")
|
newPV := makePV("pv1", "").withVersion("5").PersistentVolume
|
||||||
|
|
||||||
// Restore PV that doesn't exist
|
// Restore PV that doesn't exist
|
||||||
cache.Restore("nothing")
|
cache.Restore("nothing")
|
||||||
@ -200,7 +188,7 @@ func TestBasicPVCache(t *testing.T) {
|
|||||||
// Add a bunch of PVs
|
// Add a bunch of PVs
|
||||||
pvs := map[string]*v1.PersistentVolume{}
|
pvs := map[string]*v1.PersistentVolume{}
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
pv := makePV(fmt.Sprintf("test-pv%v", i), "1", "")
|
pv := makePV(fmt.Sprintf("test-pv%v", i), "").withVersion("1").PersistentVolume
|
||||||
pvs[pv.Name] = pv
|
pvs[pv.Name] = pv
|
||||||
internalCache.add(pv)
|
internalCache.add(pv)
|
||||||
}
|
}
|
||||||
@ -209,7 +197,7 @@ func TestBasicPVCache(t *testing.T) {
|
|||||||
verifyListPVs(t, cache, pvs, "")
|
verifyListPVs(t, cache, pvs, "")
|
||||||
|
|
||||||
// Update a PV
|
// Update a PV
|
||||||
updatedPV := makePV("test-pv3", "2", "")
|
updatedPV := makePV("test-pv3", "").withVersion("2").PersistentVolume
|
||||||
pvs[updatedPV.Name] = updatedPV
|
pvs[updatedPV.Name] = updatedPV
|
||||||
internalCache.update(nil, updatedPV)
|
internalCache.update(nil, updatedPV)
|
||||||
|
|
||||||
@ -235,7 +223,7 @@ func TestPVCacheWithStorageClasses(t *testing.T) {
|
|||||||
// Add a bunch of PVs
|
// Add a bunch of PVs
|
||||||
pvs1 := map[string]*v1.PersistentVolume{}
|
pvs1 := map[string]*v1.PersistentVolume{}
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
pv := makePV(fmt.Sprintf("test-pv%v", i), "1", "class1")
|
pv := makePV(fmt.Sprintf("test-pv%v", i), "class1").withVersion("1").PersistentVolume
|
||||||
pvs1[pv.Name] = pv
|
pvs1[pv.Name] = pv
|
||||||
internalCache.add(pv)
|
internalCache.add(pv)
|
||||||
}
|
}
|
||||||
@ -243,7 +231,7 @@ func TestPVCacheWithStorageClasses(t *testing.T) {
|
|||||||
// Add a bunch of PVs
|
// Add a bunch of PVs
|
||||||
pvs2 := map[string]*v1.PersistentVolume{}
|
pvs2 := map[string]*v1.PersistentVolume{}
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
pv := makePV(fmt.Sprintf("test2-pv%v", i), "1", "class2")
|
pv := makePV(fmt.Sprintf("test2-pv%v", i), "class2").withVersion("1").PersistentVolume
|
||||||
pvs2[pv.Name] = pv
|
pvs2[pv.Name] = pv
|
||||||
internalCache.add(pv)
|
internalCache.add(pv)
|
||||||
}
|
}
|
||||||
@ -253,7 +241,7 @@ func TestPVCacheWithStorageClasses(t *testing.T) {
|
|||||||
verifyListPVs(t, cache, pvs2, "class2")
|
verifyListPVs(t, cache, pvs2, "class2")
|
||||||
|
|
||||||
// Update a PV
|
// Update a PV
|
||||||
updatedPV := makePV("test-pv3", "2", "class1")
|
updatedPV := makePV("test-pv3", "class1").withVersion("2").PersistentVolume
|
||||||
pvs1[updatedPV.Name] = updatedPV
|
pvs1[updatedPV.Name] = updatedPV
|
||||||
internalCache.update(nil, updatedPV)
|
internalCache.update(nil, updatedPV)
|
||||||
|
|
||||||
@ -281,7 +269,7 @@ func TestAssumeUpdatePVCache(t *testing.T) {
|
|||||||
pvName := "test-pv0"
|
pvName := "test-pv0"
|
||||||
|
|
||||||
// Add a PV
|
// Add a PV
|
||||||
pv := makePV(pvName, "1", "")
|
pv := makePV(pvName, "").withVersion("1").PersistentVolume
|
||||||
internalCache.add(pv)
|
internalCache.add(pv)
|
||||||
if err := verifyPV(cache, pvName, pv); err != nil {
|
if err := verifyPV(cache, pvName, pv); err != nil {
|
||||||
t.Fatalf("failed to get PV: %v", err)
|
t.Fatalf("failed to get PV: %v", err)
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package scheduling
|
package volumebinding
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -46,8 +46,8 @@ import (
|
|||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/scheduling/metrics"
|
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/metrics"
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
)
|
)
|
||||||
|
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package scheduling
|
package volumebinding
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -105,11 +105,11 @@ var (
|
|||||||
topoMismatchClass = "topoMismatchClass"
|
topoMismatchClass = "topoMismatchClass"
|
||||||
|
|
||||||
// nodes objects
|
// nodes objects
|
||||||
node1 = makeNode("node1", map[string]string{nodeLabelKey: "node1"})
|
node1 = makeNode("node1").withLabel(nodeLabelKey, "node1").Node
|
||||||
node2 = makeNode("node2", map[string]string{nodeLabelKey: "node2"})
|
node2 = makeNode("node2").withLabel(nodeLabelKey, "node2").Node
|
||||||
node1NoLabels = makeNode("node1", nil)
|
node1NoLabels = makeNode("node1").Node
|
||||||
node1Zone1 = makeNode("node1", map[string]string{"topology.gke.io/zone": "us-east-1"})
|
node1Zone1 = makeNode("node1").withLabel("topology.gke.io/zone", "us-east-1").Node
|
||||||
node1Zone2 = makeNode("node1", map[string]string{"topology.gke.io/zone": "us-east-2"})
|
node1Zone2 = makeNode("node1").withLabel("topology.gke.io/zone", "us-east-2").Node
|
||||||
|
|
||||||
// csiNode objects
|
// csiNode objects
|
||||||
csiNode1Migrated = makeCSINode("node1", "kubernetes.io/gce-pd")
|
csiNode1Migrated = makeCSINode("node1", "kubernetes.io/gce-pd")
|
||||||
@ -589,7 +589,11 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func makeGenericEphemeralPVC(volumeName string, owned bool) *v1.PersistentVolumeClaim {
|
func makeGenericEphemeralPVC(volumeName string, owned bool) *v1.PersistentVolumeClaim {
|
||||||
pod := makePodWithGenericEphemeral()
|
pod := makePod("test-pod").
|
||||||
|
withNamespace("testns").
|
||||||
|
withNodeName("node1").
|
||||||
|
withGenericEphemeralVolume("").Pod
|
||||||
|
|
||||||
pvc := makeTestPVC(pod.Name+"-"+volumeName, "1G", "", pvcBound, "pv-bound", "1", &immediateClass)
|
pvc := makeTestPVC(pod.Name+"-"+volumeName, "1G", "", pvcBound, "pv-bound", "1", &immediateClass)
|
||||||
if owned {
|
if owned {
|
||||||
controller := true
|
controller := true
|
||||||
@ -712,15 +716,6 @@ func pvRemoveClaimUID(pv *v1.PersistentVolume) *v1.PersistentVolume {
|
|||||||
return newPV
|
return newPV
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeNode(name string, labels map[string]string) *v1.Node {
|
|
||||||
return &v1.Node{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Labels: labels,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeCSINode(name, migratedPlugin string) *storagev1.CSINode {
|
func makeCSINode(name, migratedPlugin string) *storagev1.CSINode {
|
||||||
return &storagev1.CSINode{
|
return &storagev1.CSINode{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -765,63 +760,6 @@ func makeCapacity(name, storageClassName string, node *v1.Node, capacityStr, max
|
|||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
func makePod(pvcs []*v1.PersistentVolumeClaim) *v1.Pod {
|
|
||||||
pod := &v1.Pod{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "test-pod",
|
|
||||||
Namespace: "testns",
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
volumes := []v1.Volume{}
|
|
||||||
for i, pvc := range pvcs {
|
|
||||||
pvcVol := v1.Volume{
|
|
||||||
Name: fmt.Sprintf("vol%v", i),
|
|
||||||
VolumeSource: v1.VolumeSource{
|
|
||||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
|
||||||
ClaimName: pvc.Name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
volumes = append(volumes, pvcVol)
|
|
||||||
}
|
|
||||||
pod.Spec.Volumes = volumes
|
|
||||||
pod.Spec.NodeName = "node1"
|
|
||||||
return pod
|
|
||||||
}
|
|
||||||
|
|
||||||
func makePodWithoutPVC() *v1.Pod {
|
|
||||||
pod := &v1.Pod{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "test-pod",
|
|
||||||
Namespace: "testns",
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
Volumes: []v1.Volume{
|
|
||||||
{
|
|
||||||
VolumeSource: v1.VolumeSource{
|
|
||||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return pod
|
|
||||||
}
|
|
||||||
|
|
||||||
func makePodWithGenericEphemeral(volumeNames ...string) *v1.Pod {
|
|
||||||
pod := makePod(nil)
|
|
||||||
for _, volumeName := range volumeNames {
|
|
||||||
pod.Spec.Volumes = append(pod.Spec.Volumes, v1.Volume{
|
|
||||||
Name: volumeName,
|
|
||||||
VolumeSource: v1.VolumeSource{
|
|
||||||
Ephemeral: &v1.EphemeralVolumeSource{},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return pod
|
|
||||||
}
|
|
||||||
|
|
||||||
func makeBinding(pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) *BindingInfo {
|
func makeBinding(pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) *BindingInfo {
|
||||||
return &BindingInfo{pvc: pvc.DeepCopy(), pv: pv.DeepCopy()}
|
return &BindingInfo{pvc: pvc.DeepCopy(), pv: pv.DeepCopy()}
|
||||||
}
|
}
|
||||||
@ -907,10 +845,15 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
|
|||||||
}
|
}
|
||||||
scenarios := map[string]scenarioType{
|
scenarios := map[string]scenarioType{
|
||||||
"no-volumes": {
|
"no-volumes": {
|
||||||
pod: makePod(nil),
|
pod: makePod("test-pod").
|
||||||
|
withNamespace("testns").
|
||||||
|
withNodeName("node1").Pod,
|
||||||
},
|
},
|
||||||
"no-pvcs": {
|
"no-pvcs": {
|
||||||
pod: makePodWithoutPVC(),
|
pod: makePod("test-pod").
|
||||||
|
withNamespace("testns").
|
||||||
|
withNodeName("node1").
|
||||||
|
withEmptyDirVolume().Pod,
|
||||||
},
|
},
|
||||||
"pvc-not-found": {
|
"pvc-not-found": {
|
||||||
cachePVCs: []*v1.PersistentVolumeClaim{},
|
cachePVCs: []*v1.PersistentVolumeClaim{},
|
||||||
@ -995,25 +938,37 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
|
|||||||
shouldFail: true,
|
shouldFail: true,
|
||||||
},
|
},
|
||||||
"generic-ephemeral,no-pvc": {
|
"generic-ephemeral,no-pvc": {
|
||||||
pod: makePodWithGenericEphemeral("no-such-pvc"),
|
pod: makePod("test-pod").
|
||||||
|
withNamespace("testns").
|
||||||
|
withNodeName("node1").
|
||||||
|
withGenericEphemeralVolume("no-such-pvc").Pod,
|
||||||
ephemeral: true,
|
ephemeral: true,
|
||||||
shouldFail: true,
|
shouldFail: true,
|
||||||
},
|
},
|
||||||
"generic-ephemeral,with-pvc": {
|
"generic-ephemeral,with-pvc": {
|
||||||
pod: makePodWithGenericEphemeral("test-volume"),
|
pod: makePod("test-pod").
|
||||||
|
withNamespace("testns").
|
||||||
|
withNodeName("node1").
|
||||||
|
withGenericEphemeralVolume("test-volume").Pod,
|
||||||
cachePVCs: []*v1.PersistentVolumeClaim{correctGenericPVC},
|
cachePVCs: []*v1.PersistentVolumeClaim{correctGenericPVC},
|
||||||
pvs: []*v1.PersistentVolume{pvBoundGeneric},
|
pvs: []*v1.PersistentVolume{pvBoundGeneric},
|
||||||
ephemeral: true,
|
ephemeral: true,
|
||||||
},
|
},
|
||||||
"generic-ephemeral,wrong-pvc": {
|
"generic-ephemeral,wrong-pvc": {
|
||||||
pod: makePodWithGenericEphemeral("test-volume"),
|
pod: makePod("test-pod").
|
||||||
|
withNamespace("testns").
|
||||||
|
withNodeName("node1").
|
||||||
|
withGenericEphemeralVolume("test-volume").Pod,
|
||||||
cachePVCs: []*v1.PersistentVolumeClaim{conflictingGenericPVC},
|
cachePVCs: []*v1.PersistentVolumeClaim{conflictingGenericPVC},
|
||||||
pvs: []*v1.PersistentVolume{pvBoundGeneric},
|
pvs: []*v1.PersistentVolume{pvBoundGeneric},
|
||||||
ephemeral: true,
|
ephemeral: true,
|
||||||
shouldFail: true,
|
shouldFail: true,
|
||||||
},
|
},
|
||||||
"generic-ephemeral,disabled": {
|
"generic-ephemeral,disabled": {
|
||||||
pod: makePodWithGenericEphemeral("test-volume"),
|
pod: makePod("test-pod").
|
||||||
|
withNamespace("testns").
|
||||||
|
withNodeName("node1").
|
||||||
|
withGenericEphemeralVolume("test-volume").Pod,
|
||||||
cachePVCs: []*v1.PersistentVolumeClaim{correctGenericPVC},
|
cachePVCs: []*v1.PersistentVolumeClaim{correctGenericPVC},
|
||||||
pvs: []*v1.PersistentVolume{pvBoundGeneric},
|
pvs: []*v1.PersistentVolume{pvBoundGeneric},
|
||||||
ephemeral: false,
|
ephemeral: false,
|
||||||
@ -1051,7 +1006,10 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
|
|||||||
|
|
||||||
// b. Generate pod with given claims
|
// b. Generate pod with given claims
|
||||||
if scenario.pod == nil {
|
if scenario.pod == nil {
|
||||||
scenario.pod = makePod(scenario.podPVCs)
|
scenario.pod = makePod("test-pod").
|
||||||
|
withNamespace("testns").
|
||||||
|
withNodeName("node1").
|
||||||
|
withPVCSVolume(scenario.podPVCs).Pod
|
||||||
}
|
}
|
||||||
|
|
||||||
// Execute
|
// Execute
|
||||||
@ -1179,7 +1137,10 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) {
|
|||||||
|
|
||||||
// b. Generate pod with given claims
|
// b. Generate pod with given claims
|
||||||
if scenario.pod == nil {
|
if scenario.pod == nil {
|
||||||
scenario.pod = makePod(scenario.podPVCs)
|
scenario.pod = makePod("test-pod").
|
||||||
|
withNamespace("testns").
|
||||||
|
withNodeName("node1").
|
||||||
|
withPVCSVolume(scenario.podPVCs).Pod
|
||||||
}
|
}
|
||||||
|
|
||||||
// Execute
|
// Execute
|
||||||
@ -1299,7 +1260,10 @@ func TestFindPodVolumesWithCSIMigration(t *testing.T) {
|
|||||||
|
|
||||||
// b. Generate pod with given claims
|
// b. Generate pod with given claims
|
||||||
if scenario.pod == nil {
|
if scenario.pod == nil {
|
||||||
scenario.pod = makePod(scenario.podPVCs)
|
scenario.pod = makePod("test-pod").
|
||||||
|
withNamespace("testns").
|
||||||
|
withNodeName("node1").
|
||||||
|
withPVCSVolume(scenario.podPVCs).Pod
|
||||||
}
|
}
|
||||||
|
|
||||||
// Execute
|
// Execute
|
||||||
@ -1392,7 +1356,10 @@ func TestAssumePodVolumes(t *testing.T) {
|
|||||||
// Setup
|
// Setup
|
||||||
testEnv := newTestBinder(t, ctx.Done())
|
testEnv := newTestBinder(t, ctx.Done())
|
||||||
testEnv.initClaims(scenario.podPVCs, scenario.podPVCs)
|
testEnv.initClaims(scenario.podPVCs, scenario.podPVCs)
|
||||||
pod := makePod(scenario.podPVCs)
|
pod := makePod("test-pod").
|
||||||
|
withNamespace("testns").
|
||||||
|
withNodeName("node1").
|
||||||
|
withPVCSVolume(scenario.podPVCs).Pod
|
||||||
podVolumes := &PodVolumes{
|
podVolumes := &PodVolumes{
|
||||||
StaticBindings: scenario.bindings,
|
StaticBindings: scenario.bindings,
|
||||||
DynamicProvisions: scenario.provisionedPVCs,
|
DynamicProvisions: scenario.provisionedPVCs,
|
||||||
@ -1445,7 +1412,10 @@ func TestRevertAssumedPodVolumes(t *testing.T) {
|
|||||||
// Setup
|
// Setup
|
||||||
testEnv := newTestBinder(t, ctx.Done())
|
testEnv := newTestBinder(t, ctx.Done())
|
||||||
testEnv.initClaims(podPVCs, podPVCs)
|
testEnv.initClaims(podPVCs, podPVCs)
|
||||||
pod := makePod(podPVCs)
|
pod := makePod("test-pod").
|
||||||
|
withNamespace("testns").
|
||||||
|
withNodeName("node1").
|
||||||
|
withPVCSVolume(podPVCs).Pod
|
||||||
podVolumes := &PodVolumes{
|
podVolumes := &PodVolumes{
|
||||||
StaticBindings: bindings,
|
StaticBindings: bindings,
|
||||||
DynamicProvisions: provisionedPVCs,
|
DynamicProvisions: provisionedPVCs,
|
||||||
@ -1562,7 +1532,9 @@ func TestBindAPIUpdate(t *testing.T) {
|
|||||||
|
|
||||||
// Setup
|
// Setup
|
||||||
testEnv := newTestBinder(t, ctx.Done())
|
testEnv := newTestBinder(t, ctx.Done())
|
||||||
pod := makePod(nil)
|
pod := makePod("test-pod").
|
||||||
|
withNamespace("testns").
|
||||||
|
withNodeName("node1").Pod
|
||||||
if scenario.apiPVs == nil {
|
if scenario.apiPVs == nil {
|
||||||
scenario.apiPVs = scenario.cachedPVs
|
scenario.apiPVs = scenario.cachedPVs
|
||||||
}
|
}
|
||||||
@ -1757,7 +1729,9 @@ func TestCheckBindings(t *testing.T) {
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
// Setup
|
// Setup
|
||||||
pod := makePod(nil)
|
pod := makePod("test-pod").
|
||||||
|
withNamespace("testns").
|
||||||
|
withNodeName("node1").Pod
|
||||||
testEnv := newTestBinder(t, ctx.Done())
|
testEnv := newTestBinder(t, ctx.Done())
|
||||||
testEnv.internalPodInformer.Informer().GetIndexer().Add(pod)
|
testEnv.internalPodInformer.Informer().GetIndexer().Add(pod)
|
||||||
testEnv.initNodes([]*v1.Node{node1})
|
testEnv.initNodes([]*v1.Node{node1})
|
||||||
@ -1886,7 +1860,9 @@ func TestCheckBindingsWithCSIMigration(t *testing.T) {
|
|||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigrationGCE, scenario.migrationEnabled)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigrationGCE, scenario.migrationEnabled)()
|
||||||
|
|
||||||
// Setup
|
// Setup
|
||||||
pod := makePod(nil)
|
pod := makePod("test-pod").
|
||||||
|
withNamespace("testns").
|
||||||
|
withNodeName("node1").Pod
|
||||||
testEnv := newTestBinder(t, ctx.Done())
|
testEnv := newTestBinder(t, ctx.Done())
|
||||||
testEnv.internalPodInformer.Informer().GetIndexer().Add(pod)
|
testEnv.internalPodInformer.Informer().GetIndexer().Add(pod)
|
||||||
testEnv.initNodes(scenario.initNodes)
|
testEnv.initNodes(scenario.initNodes)
|
||||||
@ -2070,7 +2046,9 @@ func TestBindPodVolumes(t *testing.T) {
|
|||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
// Setup
|
// Setup
|
||||||
pod := makePod(nil)
|
pod := makePod("test-pod").
|
||||||
|
withNamespace("testns").
|
||||||
|
withNodeName("node1").Pod
|
||||||
testEnv := newTestBinder(t, ctx.Done())
|
testEnv := newTestBinder(t, ctx.Done())
|
||||||
testEnv.internalPodInformer.Informer().GetIndexer().Add(pod)
|
testEnv.internalPodInformer.Informer().GetIndexer().Add(pod)
|
||||||
if scenario.nodes == nil {
|
if scenario.nodes == nil {
|
||||||
@ -2150,7 +2128,10 @@ func TestFindAssumeVolumes(t *testing.T) {
|
|||||||
testEnv := newTestBinder(t, ctx.Done())
|
testEnv := newTestBinder(t, ctx.Done())
|
||||||
testEnv.initVolumes(pvs, pvs)
|
testEnv.initVolumes(pvs, pvs)
|
||||||
testEnv.initClaims(podPVCs, podPVCs)
|
testEnv.initClaims(podPVCs, podPVCs)
|
||||||
pod := makePod(podPVCs)
|
pod := makePod("test-pod").
|
||||||
|
withNamespace("testns").
|
||||||
|
withNodeName("node1").
|
||||||
|
withPVCSVolume(podPVCs).Pod
|
||||||
|
|
||||||
testNode := &v1.Node{
|
testNode := &v1.Node{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -2311,7 +2292,10 @@ func TestCapacity(t *testing.T) {
|
|||||||
testEnv.initClaims(scenario.pvcs, scenario.pvcs)
|
testEnv.initClaims(scenario.pvcs, scenario.pvcs)
|
||||||
|
|
||||||
// b. Generate pod with given claims
|
// b. Generate pod with given claims
|
||||||
pod := makePod(scenario.pvcs)
|
pod := makePod("test-pod").
|
||||||
|
withNamespace("testns").
|
||||||
|
withNodeName("node1").
|
||||||
|
withPVCSVolume(scenario.pvcs).Pod
|
||||||
|
|
||||||
// Execute
|
// Execute
|
||||||
podVolumes, reasons, err := findPodVolumes(testEnv.binder, pod, testNode)
|
podVolumes, reasons, err := findPodVolumes(testEnv.binder, pod, testNode)
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package scheduling
|
package volumebinding
|
||||||
|
|
||||||
import v1 "k8s.io/api/core/v1"
|
import v1 "k8s.io/api/core/v1"
|
||||||
|
|
@ -19,12 +19,11 @@ package volumebinding
|
|||||||
import (
|
import (
|
||||||
"math"
|
"math"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/scheduling"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
|
||||||
)
|
)
|
||||||
|
|
||||||
// classResourceMap holds a map of storage class to resource.
|
// classResourceMap holds a map of storage class to resource.
|
||||||
type classResourceMap map[string]*scheduling.StorageResource
|
type classResourceMap map[string]*StorageResource
|
||||||
|
|
||||||
// volumeCapacityScorer calculates the score based on class storage resource information.
|
// volumeCapacityScorer calculates the score based on class storage resource information.
|
||||||
type volumeCapacityScorer func(classResourceMap) int64
|
type volumeCapacityScorer func(classResourceMap) int64
|
||||||
|
@ -19,7 +19,6 @@ package volumebinding
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/scheduling"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
|
||||||
@ -53,7 +52,7 @@ func TestScore(t *testing.T) {
|
|||||||
cases: []scoreCase{
|
cases: []scoreCase{
|
||||||
{
|
{
|
||||||
classResourceMap{
|
classResourceMap{
|
||||||
classHDD: &scheduling.StorageResource{
|
classHDD: &StorageResource{
|
||||||
Requested: 0,
|
Requested: 0,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
@ -62,7 +61,7 @@ func TestScore(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
classResourceMap{
|
classResourceMap{
|
||||||
classHDD: &scheduling.StorageResource{
|
classHDD: &StorageResource{
|
||||||
Requested: 30,
|
Requested: 30,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
@ -71,7 +70,7 @@ func TestScore(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
classResourceMap{
|
classResourceMap{
|
||||||
classHDD: &scheduling.StorageResource{
|
classHDD: &StorageResource{
|
||||||
Requested: 50,
|
Requested: 50,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
@ -80,7 +79,7 @@ func TestScore(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
classResourceMap{
|
classResourceMap{
|
||||||
classHDD: &scheduling.StorageResource{
|
classHDD: &StorageResource{
|
||||||
Requested: 100,
|
Requested: 100,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
@ -95,11 +94,11 @@ func TestScore(t *testing.T) {
|
|||||||
cases: []scoreCase{
|
cases: []scoreCase{
|
||||||
{
|
{
|
||||||
classResourceMap{
|
classResourceMap{
|
||||||
classHDD: &scheduling.StorageResource{
|
classHDD: &StorageResource{
|
||||||
Requested: 0,
|
Requested: 0,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
classSSD: &scheduling.StorageResource{
|
classSSD: &StorageResource{
|
||||||
Requested: 0,
|
Requested: 0,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
@ -108,11 +107,11 @@ func TestScore(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
classResourceMap{
|
classResourceMap{
|
||||||
classHDD: &scheduling.StorageResource{
|
classHDD: &StorageResource{
|
||||||
Requested: 0,
|
Requested: 0,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
classSSD: &scheduling.StorageResource{
|
classSSD: &StorageResource{
|
||||||
Requested: 30,
|
Requested: 30,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
@ -121,11 +120,11 @@ func TestScore(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
classResourceMap{
|
classResourceMap{
|
||||||
classHDD: &scheduling.StorageResource{
|
classHDD: &StorageResource{
|
||||||
Requested: 30,
|
Requested: 30,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
classSSD: &scheduling.StorageResource{
|
classSSD: &StorageResource{
|
||||||
Requested: 30,
|
Requested: 30,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
@ -134,11 +133,11 @@ func TestScore(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
classResourceMap{
|
classResourceMap{
|
||||||
classHDD: &scheduling.StorageResource{
|
classHDD: &StorageResource{
|
||||||
Requested: 30,
|
Requested: 30,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
classSSD: &scheduling.StorageResource{
|
classSSD: &StorageResource{
|
||||||
Requested: 60,
|
Requested: 60,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
@ -147,11 +146,11 @@ func TestScore(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
classResourceMap{
|
classResourceMap{
|
||||||
classHDD: &scheduling.StorageResource{
|
classHDD: &StorageResource{
|
||||||
Requested: 50,
|
Requested: 50,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
classSSD: &scheduling.StorageResource{
|
classSSD: &StorageResource{
|
||||||
Requested: 50,
|
Requested: 50,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
@ -160,11 +159,11 @@ func TestScore(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
classResourceMap{
|
classResourceMap{
|
||||||
classHDD: &scheduling.StorageResource{
|
classHDD: &StorageResource{
|
||||||
Requested: 50,
|
Requested: 50,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
classSSD: &scheduling.StorageResource{
|
classSSD: &StorageResource{
|
||||||
Requested: 100,
|
Requested: 100,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
@ -173,11 +172,11 @@ func TestScore(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
classResourceMap{
|
classResourceMap{
|
||||||
classHDD: &scheduling.StorageResource{
|
classHDD: &StorageResource{
|
||||||
Requested: 100,
|
Requested: 100,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
classSSD: &scheduling.StorageResource{
|
classSSD: &StorageResource{
|
||||||
Requested: 100,
|
Requested: 100,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
@ -205,11 +204,11 @@ func TestScore(t *testing.T) {
|
|||||||
cases: []scoreCase{
|
cases: []scoreCase{
|
||||||
{
|
{
|
||||||
classResourceMap{
|
classResourceMap{
|
||||||
classHDD: &scheduling.StorageResource{
|
classHDD: &StorageResource{
|
||||||
Requested: 0,
|
Requested: 0,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
classSSD: &scheduling.StorageResource{
|
classSSD: &StorageResource{
|
||||||
Requested: 0,
|
Requested: 0,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
@ -218,11 +217,11 @@ func TestScore(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
classResourceMap{
|
classResourceMap{
|
||||||
classHDD: &scheduling.StorageResource{
|
classHDD: &StorageResource{
|
||||||
Requested: 0,
|
Requested: 0,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
classSSD: &scheduling.StorageResource{
|
classSSD: &StorageResource{
|
||||||
Requested: 30,
|
Requested: 30,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
@ -231,11 +230,11 @@ func TestScore(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
classResourceMap{
|
classResourceMap{
|
||||||
classHDD: &scheduling.StorageResource{
|
classHDD: &StorageResource{
|
||||||
Requested: 30,
|
Requested: 30,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
classSSD: &scheduling.StorageResource{
|
classSSD: &StorageResource{
|
||||||
Requested: 30,
|
Requested: 30,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
@ -244,11 +243,11 @@ func TestScore(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
classResourceMap{
|
classResourceMap{
|
||||||
classHDD: &scheduling.StorageResource{
|
classHDD: &StorageResource{
|
||||||
Requested: 30,
|
Requested: 30,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
classSSD: &scheduling.StorageResource{
|
classSSD: &StorageResource{
|
||||||
Requested: 60,
|
Requested: 60,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
@ -257,11 +256,11 @@ func TestScore(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
classResourceMap{
|
classResourceMap{
|
||||||
classHDD: &scheduling.StorageResource{
|
classHDD: &StorageResource{
|
||||||
Requested: 50,
|
Requested: 50,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
classSSD: &scheduling.StorageResource{
|
classSSD: &StorageResource{
|
||||||
Requested: 100,
|
Requested: 100,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
@ -270,11 +269,11 @@ func TestScore(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
classResourceMap{
|
classResourceMap{
|
||||||
classHDD: &scheduling.StorageResource{
|
classHDD: &StorageResource{
|
||||||
Requested: 90,
|
Requested: 90,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
classSSD: &scheduling.StorageResource{
|
classSSD: &StorageResource{
|
||||||
Requested: 90,
|
Requested: 90,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
@ -283,11 +282,11 @@ func TestScore(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
classResourceMap{
|
classResourceMap{
|
||||||
classHDD: &scheduling.StorageResource{
|
classHDD: &StorageResource{
|
||||||
Requested: 100,
|
Requested: 100,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
classSSD: &scheduling.StorageResource{
|
classSSD: &StorageResource{
|
||||||
Requested: 100,
|
Requested: 100,
|
||||||
Capacity: 100,
|
Capacity: 100,
|
||||||
},
|
},
|
||||||
|
199
pkg/scheduler/framework/plugins/volumebinding/test_utils.go
Normal file
199
pkg/scheduler/framework/plugins/volumebinding/test_utils.go
Normal file
@ -0,0 +1,199 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2021 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package volumebinding
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||||
|
"k8s.io/utils/pointer"
|
||||||
|
)
|
||||||
|
|
||||||
|
type nodeBuilder struct {
|
||||||
|
*v1.Node
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeNode(name string) nodeBuilder {
|
||||||
|
return nodeBuilder{Node: &v1.Node{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Labels: map[string]string{
|
||||||
|
v1.LabelHostname: name,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nb nodeBuilder) withLabel(key, value string) nodeBuilder {
|
||||||
|
if nb.Node.ObjectMeta.Labels == nil {
|
||||||
|
nb.Node.ObjectMeta.Labels = map[string]string{}
|
||||||
|
}
|
||||||
|
nb.Node.ObjectMeta.Labels[key] = value
|
||||||
|
return nb
|
||||||
|
}
|
||||||
|
|
||||||
|
type pvBuilder struct {
|
||||||
|
*v1.PersistentVolume
|
||||||
|
}
|
||||||
|
|
||||||
|
func makePV(name, className string) pvBuilder {
|
||||||
|
return pvBuilder{PersistentVolume: &v1.PersistentVolume{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
Spec: v1.PersistentVolumeSpec{
|
||||||
|
StorageClassName: className,
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pvb pvBuilder) withNodeAffinity(keyValues map[string][]string) pvBuilder {
|
||||||
|
matchExpressions := make([]v1.NodeSelectorRequirement, 0)
|
||||||
|
for key, values := range keyValues {
|
||||||
|
matchExpressions = append(matchExpressions, v1.NodeSelectorRequirement{
|
||||||
|
Key: key,
|
||||||
|
Operator: v1.NodeSelectorOpIn,
|
||||||
|
Values: values,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
pvb.PersistentVolume.Spec.NodeAffinity = &v1.VolumeNodeAffinity{
|
||||||
|
Required: &v1.NodeSelector{
|
||||||
|
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||||
|
{
|
||||||
|
MatchExpressions: matchExpressions,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return pvb
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pvb pvBuilder) withVersion(version string) pvBuilder {
|
||||||
|
pvb.PersistentVolume.ObjectMeta.ResourceVersion = version
|
||||||
|
return pvb
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pvb pvBuilder) withCapacity(capacity resource.Quantity) pvBuilder {
|
||||||
|
pvb.PersistentVolume.Spec.Capacity = v1.ResourceList{
|
||||||
|
v1.ResourceName(v1.ResourceStorage): capacity,
|
||||||
|
}
|
||||||
|
return pvb
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pvb pvBuilder) withPhase(phase v1.PersistentVolumePhase) pvBuilder {
|
||||||
|
pvb.PersistentVolume.Status = v1.PersistentVolumeStatus{
|
||||||
|
Phase: phase,
|
||||||
|
}
|
||||||
|
return pvb
|
||||||
|
}
|
||||||
|
|
||||||
|
type pvcBuilder struct {
|
||||||
|
*v1.PersistentVolumeClaim
|
||||||
|
}
|
||||||
|
|
||||||
|
func makePVC(name string, storageClassName string) pvcBuilder {
|
||||||
|
return pvcBuilder{PersistentVolumeClaim: &v1.PersistentVolumeClaim{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Namespace: v1.NamespaceDefault,
|
||||||
|
},
|
||||||
|
Spec: v1.PersistentVolumeClaimSpec{
|
||||||
|
StorageClassName: pointer.StringPtr(storageClassName),
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pvcb pvcBuilder) withBoundPV(pvName string) pvcBuilder {
|
||||||
|
pvcb.PersistentVolumeClaim.Spec.VolumeName = pvName
|
||||||
|
metav1.SetMetaDataAnnotation(&pvcb.PersistentVolumeClaim.ObjectMeta, pvutil.AnnBindCompleted, "true")
|
||||||
|
return pvcb
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pvcb pvcBuilder) withRequestStorage(request resource.Quantity) pvcBuilder {
|
||||||
|
pvcb.PersistentVolumeClaim.Spec.Resources = v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceName(v1.ResourceStorage): request,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
return pvcb
|
||||||
|
}
|
||||||
|
|
||||||
|
type podBuilder struct {
|
||||||
|
*v1.Pod
|
||||||
|
}
|
||||||
|
|
||||||
|
func makePod(name string) podBuilder {
|
||||||
|
pb := podBuilder{Pod: &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
Namespace: v1.NamespaceDefault,
|
||||||
|
},
|
||||||
|
}}
|
||||||
|
pb.Pod.Spec.Volumes = make([]v1.Volume, 0)
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb podBuilder) withNodeName(name string) podBuilder {
|
||||||
|
pb.Pod.Spec.NodeName = name
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb podBuilder) withNamespace(name string) podBuilder {
|
||||||
|
pb.Pod.ObjectMeta.Namespace = name
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb podBuilder) withPVCVolume(pvcName, name string) podBuilder {
|
||||||
|
pb.Pod.Spec.Volumes = append(pb.Pod.Spec.Volumes, v1.Volume{
|
||||||
|
Name: name,
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||||
|
ClaimName: pvcName,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb podBuilder) withPVCSVolume(pvcs []*v1.PersistentVolumeClaim) podBuilder {
|
||||||
|
for i, pvc := range pvcs {
|
||||||
|
pb.withPVCVolume(pvc.Name, fmt.Sprintf("vol%v", i))
|
||||||
|
}
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb podBuilder) withEmptyDirVolume() podBuilder {
|
||||||
|
pb.Pod.Spec.Volumes = append(pb.Pod.Spec.Volumes, v1.Volume{
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return pb
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pb podBuilder) withGenericEphemeralVolume(name string) podBuilder {
|
||||||
|
pb.Pod.Spec.Volumes = append(pb.Pod.Spec.Volumes, v1.Volume{
|
||||||
|
Name: name,
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
Ephemeral: &v1.EphemeralVolumeSource{},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
return pb
|
||||||
|
}
|
@ -29,7 +29,6 @@ import (
|
|||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
corelisters "k8s.io/client-go/listers/core/v1"
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/scheduling"
|
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
|
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
|
||||||
@ -58,7 +57,7 @@ type stateData struct {
|
|||||||
// podVolumesByNode holds the pod's volume information found in the Filter
|
// podVolumesByNode holds the pod's volume information found in the Filter
|
||||||
// phase for each node
|
// phase for each node
|
||||||
// it's initialized in the PreFilter phase
|
// it's initialized in the PreFilter phase
|
||||||
podVolumesByNode map[string]*scheduling.PodVolumes
|
podVolumesByNode map[string]*PodVolumes
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -70,7 +69,7 @@ func (d *stateData) Clone() framework.StateData {
|
|||||||
// In the Filter phase, pod binding cache is created for the pod and used in
|
// In the Filter phase, pod binding cache is created for the pod and used in
|
||||||
// Reserve and PreBind phases.
|
// Reserve and PreBind phases.
|
||||||
type VolumeBinding struct {
|
type VolumeBinding struct {
|
||||||
Binder scheduling.SchedulerVolumeBinder
|
Binder SchedulerVolumeBinder
|
||||||
PVCLister corelisters.PersistentVolumeClaimLister
|
PVCLister corelisters.PersistentVolumeClaimLister
|
||||||
GenericEphemeralVolumeFeatureEnabled bool
|
GenericEphemeralVolumeFeatureEnabled bool
|
||||||
scorer volumeCapacityScorer
|
scorer volumeCapacityScorer
|
||||||
@ -179,7 +178,7 @@ func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleSt
|
|||||||
status.AppendReason("pod has unbound immediate PersistentVolumeClaims")
|
status.AppendReason("pod has unbound immediate PersistentVolumeClaims")
|
||||||
return status
|
return status
|
||||||
}
|
}
|
||||||
state.Write(stateKey, &stateData{boundClaims: boundClaims, claimsToBind: claimsToBind, podVolumesByNode: make(map[string]*scheduling.PodVolumes)})
|
state.Write(stateKey, &stateData{boundClaims: boundClaims, claimsToBind: claimsToBind, podVolumesByNode: make(map[string]*PodVolumes)})
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -270,7 +269,7 @@ func (pl *VolumeBinding) Score(ctx context.Context, cs *framework.CycleState, po
|
|||||||
class := staticBinding.StorageClassName()
|
class := staticBinding.StorageClassName()
|
||||||
storageResource := staticBinding.StorageResource()
|
storageResource := staticBinding.StorageResource()
|
||||||
if _, ok := classResources[class]; !ok {
|
if _, ok := classResources[class]; !ok {
|
||||||
classResources[class] = &scheduling.StorageResource{
|
classResources[class] = &StorageResource{
|
||||||
Requested: 0,
|
Requested: 0,
|
||||||
Capacity: 0,
|
Capacity: 0,
|
||||||
}
|
}
|
||||||
@ -367,14 +366,14 @@ func New(plArgs runtime.Object, fh framework.Handle) (framework.Plugin, error) {
|
|||||||
pvInformer := fh.SharedInformerFactory().Core().V1().PersistentVolumes()
|
pvInformer := fh.SharedInformerFactory().Core().V1().PersistentVolumes()
|
||||||
storageClassInformer := fh.SharedInformerFactory().Storage().V1().StorageClasses()
|
storageClassInformer := fh.SharedInformerFactory().Storage().V1().StorageClasses()
|
||||||
csiNodeInformer := fh.SharedInformerFactory().Storage().V1().CSINodes()
|
csiNodeInformer := fh.SharedInformerFactory().Storage().V1().CSINodes()
|
||||||
var capacityCheck *scheduling.CapacityCheck
|
var capacityCheck *CapacityCheck
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.CSIStorageCapacity) {
|
if utilfeature.DefaultFeatureGate.Enabled(features.CSIStorageCapacity) {
|
||||||
capacityCheck = &scheduling.CapacityCheck{
|
capacityCheck = &CapacityCheck{
|
||||||
CSIDriverInformer: fh.SharedInformerFactory().Storage().V1().CSIDrivers(),
|
CSIDriverInformer: fh.SharedInformerFactory().Storage().V1().CSIDrivers(),
|
||||||
CSIStorageCapacityInformer: fh.SharedInformerFactory().Storage().V1beta1().CSIStorageCapacities(),
|
CSIStorageCapacityInformer: fh.SharedInformerFactory().Storage().V1beta1().CSIStorageCapacities(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
binder := scheduling.NewVolumeBinder(fh.ClientSet(), podInformer, nodeInformer, csiNodeInformer, pvcInformer, pvInformer, storageClassInformer, capacityCheck, time.Duration(args.BindTimeoutSeconds)*time.Second)
|
binder := NewVolumeBinder(fh.ClientSet(), podInformer, nodeInformer, csiNodeInformer, pvcInformer, pvInformer, storageClassInformer, capacityCheck, time.Duration(args.BindTimeoutSeconds)*time.Second)
|
||||||
|
|
||||||
// build score function
|
// build score function
|
||||||
var scorer volumeCapacityScorer
|
var scorer volumeCapacityScorer
|
||||||
|
@ -32,13 +32,10 @@ import (
|
|||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
"k8s.io/component-base/featuregate"
|
"k8s.io/component-base/featuregate"
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/scheduling"
|
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||||
"k8s.io/utils/pointer"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -75,112 +72,6 @@ var (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func makeNode(name string) *v1.Node {
|
|
||||||
return &v1.Node{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Labels: map[string]string{
|
|
||||||
v1.LabelHostname: name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func mergeNodeLabels(node *v1.Node, labels map[string]string) *v1.Node {
|
|
||||||
for k, v := range labels {
|
|
||||||
node.Labels[k] = v
|
|
||||||
}
|
|
||||||
return node
|
|
||||||
}
|
|
||||||
|
|
||||||
func makePV(name string, className string) *v1.PersistentVolume {
|
|
||||||
return &v1.PersistentVolume{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
},
|
|
||||||
Spec: v1.PersistentVolumeSpec{
|
|
||||||
StorageClassName: className,
|
|
||||||
},
|
|
||||||
Status: v1.PersistentVolumeStatus{
|
|
||||||
Phase: v1.VolumeAvailable,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func setPVNodeAffinity(pv *v1.PersistentVolume, keyValues map[string][]string) *v1.PersistentVolume {
|
|
||||||
matchExpressions := make([]v1.NodeSelectorRequirement, 0)
|
|
||||||
for key, values := range keyValues {
|
|
||||||
matchExpressions = append(matchExpressions, v1.NodeSelectorRequirement{
|
|
||||||
Key: key,
|
|
||||||
Operator: v1.NodeSelectorOpIn,
|
|
||||||
Values: values,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
pv.Spec.NodeAffinity = &v1.VolumeNodeAffinity{
|
|
||||||
Required: &v1.NodeSelector{
|
|
||||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
|
||||||
{
|
|
||||||
MatchExpressions: matchExpressions,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return pv
|
|
||||||
}
|
|
||||||
|
|
||||||
func setPVCapacity(pv *v1.PersistentVolume, capacity resource.Quantity) *v1.PersistentVolume {
|
|
||||||
pv.Spec.Capacity = v1.ResourceList{
|
|
||||||
v1.ResourceName(v1.ResourceStorage): capacity,
|
|
||||||
}
|
|
||||||
return pv
|
|
||||||
}
|
|
||||||
|
|
||||||
func makePVC(name string, boundPVName string, storageClassName string) *v1.PersistentVolumeClaim {
|
|
||||||
pvc := &v1.PersistentVolumeClaim{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: v1.NamespaceDefault,
|
|
||||||
},
|
|
||||||
Spec: v1.PersistentVolumeClaimSpec{
|
|
||||||
StorageClassName: pointer.StringPtr(storageClassName),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
if boundPVName != "" {
|
|
||||||
pvc.Spec.VolumeName = boundPVName
|
|
||||||
metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, pvutil.AnnBindCompleted, "true")
|
|
||||||
}
|
|
||||||
return pvc
|
|
||||||
}
|
|
||||||
|
|
||||||
func setPVCRequestStorage(pvc *v1.PersistentVolumeClaim, request resource.Quantity) *v1.PersistentVolumeClaim {
|
|
||||||
pvc.Spec.Resources = v1.ResourceRequirements{
|
|
||||||
Requests: v1.ResourceList{
|
|
||||||
v1.ResourceName(v1.ResourceStorage): request,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return pvc
|
|
||||||
}
|
|
||||||
|
|
||||||
func makePod(name string, pvcNames []string) *v1.Pod {
|
|
||||||
p := &v1.Pod{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
Namespace: v1.NamespaceDefault,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
p.Spec.Volumes = make([]v1.Volume, 0)
|
|
||||||
for _, pvcName := range pvcNames {
|
|
||||||
p.Spec.Volumes = append(p.Spec.Volumes, v1.Volume{
|
|
||||||
VolumeSource: v1.VolumeSource{
|
|
||||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
|
||||||
ClaimName: pvcName,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestVolumeBinding(t *testing.T) {
|
func TestVolumeBinding(t *testing.T) {
|
||||||
table := []struct {
|
table := []struct {
|
||||||
name string
|
name string
|
||||||
@ -197,9 +88,9 @@ func TestVolumeBinding(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "pod has not pvcs",
|
name: "pod has not pvcs",
|
||||||
pod: makePod("pod-a", nil),
|
pod: makePod("pod-a").Pod,
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
makeNode("node-a"),
|
makeNode("node-a").Node,
|
||||||
},
|
},
|
||||||
wantStateAfterPreFilter: &stateData{
|
wantStateAfterPreFilter: &stateData{
|
||||||
skip: true,
|
skip: true,
|
||||||
@ -213,22 +104,22 @@ func TestVolumeBinding(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "all bound",
|
name: "all bound",
|
||||||
pod: makePod("pod-a", []string{"pvc-a"}),
|
pod: makePod("pod-a").withPVCVolume("pvc-a", "").Pod,
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
makeNode("node-a"),
|
makeNode("node-a").Node,
|
||||||
},
|
},
|
||||||
pvcs: []*v1.PersistentVolumeClaim{
|
pvcs: []*v1.PersistentVolumeClaim{
|
||||||
makePVC("pvc-a", "pv-a", waitSC.Name),
|
makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
|
||||||
},
|
},
|
||||||
pvs: []*v1.PersistentVolume{
|
pvs: []*v1.PersistentVolume{
|
||||||
makePV("pv-a", waitSC.Name),
|
makePV("pv-a", waitSC.Name).withPhase(v1.VolumeAvailable).PersistentVolume,
|
||||||
},
|
},
|
||||||
wantStateAfterPreFilter: &stateData{
|
wantStateAfterPreFilter: &stateData{
|
||||||
boundClaims: []*v1.PersistentVolumeClaim{
|
boundClaims: []*v1.PersistentVolumeClaim{
|
||||||
makePVC("pvc-a", "pv-a", waitSC.Name),
|
makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
|
||||||
},
|
},
|
||||||
claimsToBind: []*v1.PersistentVolumeClaim{},
|
claimsToBind: []*v1.PersistentVolumeClaim{},
|
||||||
podVolumesByNode: map[string]*scheduling.PodVolumes{},
|
podVolumesByNode: map[string]*PodVolumes{},
|
||||||
},
|
},
|
||||||
wantFilterStatus: []*framework.Status{
|
wantFilterStatus: []*framework.Status{
|
||||||
nil,
|
nil,
|
||||||
@ -239,9 +130,9 @@ func TestVolumeBinding(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "PVC does not exist",
|
name: "PVC does not exist",
|
||||||
pod: makePod("pod-a", []string{"pvc-a"}),
|
pod: makePod("pod-a").withPVCVolume("pvc-a", "").Pod,
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
makeNode("node-a"),
|
makeNode("node-a").Node,
|
||||||
},
|
},
|
||||||
pvcs: []*v1.PersistentVolumeClaim{},
|
pvcs: []*v1.PersistentVolumeClaim{},
|
||||||
wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "pvc-a" not found`),
|
wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "pvc-a" not found`),
|
||||||
@ -254,12 +145,12 @@ func TestVolumeBinding(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Part of PVCs do not exist",
|
name: "Part of PVCs do not exist",
|
||||||
pod: makePod("pod-a", []string{"pvc-a", "pvc-b"}),
|
pod: makePod("pod-a").withPVCVolume("pvc-a", "").withPVCVolume("pvc-b", "").Pod,
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
makeNode("node-a"),
|
makeNode("node-a").Node,
|
||||||
},
|
},
|
||||||
pvcs: []*v1.PersistentVolumeClaim{
|
pvcs: []*v1.PersistentVolumeClaim{
|
||||||
makePVC("pvc-a", "pv-a", waitSC.Name),
|
makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
|
||||||
},
|
},
|
||||||
wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "pvc-b" not found`),
|
wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "pvc-b" not found`),
|
||||||
wantFilterStatus: []*framework.Status{
|
wantFilterStatus: []*framework.Status{
|
||||||
@ -271,12 +162,12 @@ func TestVolumeBinding(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "immediate claims not bound",
|
name: "immediate claims not bound",
|
||||||
pod: makePod("pod-a", []string{"pvc-a"}),
|
pod: makePod("pod-a").withPVCVolume("pvc-a", "").Pod,
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
makeNode("node-a"),
|
makeNode("node-a").Node,
|
||||||
},
|
},
|
||||||
pvcs: []*v1.PersistentVolumeClaim{
|
pvcs: []*v1.PersistentVolumeClaim{
|
||||||
makePVC("pvc-a", "", immediateSC.Name),
|
makePVC("pvc-a", immediateSC.Name).PersistentVolumeClaim,
|
||||||
},
|
},
|
||||||
wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, "pod has unbound immediate PersistentVolumeClaims"),
|
wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, "pod has unbound immediate PersistentVolumeClaims"),
|
||||||
wantFilterStatus: []*framework.Status{
|
wantFilterStatus: []*framework.Status{
|
||||||
@ -288,22 +179,22 @@ func TestVolumeBinding(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "unbound claims no matches",
|
name: "unbound claims no matches",
|
||||||
pod: makePod("pod-a", []string{"pvc-a"}),
|
pod: makePod("pod-a").withPVCVolume("pvc-a", "").Pod,
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
makeNode("node-a"),
|
makeNode("node-a").Node,
|
||||||
},
|
},
|
||||||
pvcs: []*v1.PersistentVolumeClaim{
|
pvcs: []*v1.PersistentVolumeClaim{
|
||||||
makePVC("pvc-a", "", waitSC.Name),
|
makePVC("pvc-a", waitSC.Name).PersistentVolumeClaim,
|
||||||
},
|
},
|
||||||
wantStateAfterPreFilter: &stateData{
|
wantStateAfterPreFilter: &stateData{
|
||||||
boundClaims: []*v1.PersistentVolumeClaim{},
|
boundClaims: []*v1.PersistentVolumeClaim{},
|
||||||
claimsToBind: []*v1.PersistentVolumeClaim{
|
claimsToBind: []*v1.PersistentVolumeClaim{
|
||||||
makePVC("pvc-a", "", waitSC.Name),
|
makePVC("pvc-a", waitSC.Name).PersistentVolumeClaim,
|
||||||
},
|
},
|
||||||
podVolumesByNode: map[string]*scheduling.PodVolumes{},
|
podVolumesByNode: map[string]*PodVolumes{},
|
||||||
},
|
},
|
||||||
wantFilterStatus: []*framework.Status{
|
wantFilterStatus: []*framework.Status{
|
||||||
framework.NewStatus(framework.UnschedulableAndUnresolvable, string(scheduling.ErrReasonBindConflict)),
|
framework.NewStatus(framework.UnschedulableAndUnresolvable, string(ErrReasonBindConflict)),
|
||||||
},
|
},
|
||||||
wantScores: []int64{
|
wantScores: []int64{
|
||||||
0,
|
0,
|
||||||
@ -311,32 +202,30 @@ func TestVolumeBinding(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "bound and unbound unsatisfied",
|
name: "bound and unbound unsatisfied",
|
||||||
pod: makePod("pod-a", []string{"pvc-a", "pvc-b"}),
|
pod: makePod("pod-a").withPVCVolume("pvc-a", "").withPVCVolume("pvc-b", "").Pod,
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
mergeNodeLabels(makeNode("node-a"), map[string]string{
|
makeNode("node-a").withLabel("foo", "barbar").Node,
|
||||||
"foo": "barbar",
|
|
||||||
}),
|
|
||||||
},
|
},
|
||||||
pvcs: []*v1.PersistentVolumeClaim{
|
pvcs: []*v1.PersistentVolumeClaim{
|
||||||
makePVC("pvc-a", "pv-a", waitSC.Name),
|
makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
|
||||||
makePVC("pvc-b", "", waitSC.Name),
|
makePVC("pvc-b", waitSC.Name).PersistentVolumeClaim,
|
||||||
},
|
},
|
||||||
pvs: []*v1.PersistentVolume{
|
pvs: []*v1.PersistentVolume{
|
||||||
setPVNodeAffinity(makePV("pv-a", waitSC.Name), map[string][]string{
|
makePV("pv-a", waitSC.Name).
|
||||||
"foo": {"bar"},
|
withPhase(v1.VolumeAvailable).
|
||||||
}),
|
withNodeAffinity(map[string][]string{"foo": {"bar"}}).PersistentVolume,
|
||||||
},
|
},
|
||||||
wantStateAfterPreFilter: &stateData{
|
wantStateAfterPreFilter: &stateData{
|
||||||
boundClaims: []*v1.PersistentVolumeClaim{
|
boundClaims: []*v1.PersistentVolumeClaim{
|
||||||
makePVC("pvc-a", "pv-a", waitSC.Name),
|
makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
|
||||||
},
|
},
|
||||||
claimsToBind: []*v1.PersistentVolumeClaim{
|
claimsToBind: []*v1.PersistentVolumeClaim{
|
||||||
makePVC("pvc-b", "", waitSC.Name),
|
makePVC("pvc-b", waitSC.Name).PersistentVolumeClaim,
|
||||||
},
|
},
|
||||||
podVolumesByNode: map[string]*scheduling.PodVolumes{},
|
podVolumesByNode: map[string]*PodVolumes{},
|
||||||
},
|
},
|
||||||
wantFilterStatus: []*framework.Status{
|
wantFilterStatus: []*framework.Status{
|
||||||
framework.NewStatus(framework.UnschedulableAndUnresolvable, string(scheduling.ErrReasonNodeConflict), string(scheduling.ErrReasonBindConflict)),
|
framework.NewStatus(framework.UnschedulableAndUnresolvable, string(ErrReasonNodeConflict), string(ErrReasonBindConflict)),
|
||||||
},
|
},
|
||||||
wantScores: []int64{
|
wantScores: []int64{
|
||||||
0,
|
0,
|
||||||
@ -344,9 +233,9 @@ func TestVolumeBinding(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "pvc not found",
|
name: "pvc not found",
|
||||||
pod: makePod("pod-a", []string{"pvc-a"}),
|
pod: makePod("pod-a").withPVCVolume("pvc-a", "").Pod,
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
makeNode("node-a"),
|
makeNode("node-a").Node,
|
||||||
},
|
},
|
||||||
wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "pvc-a" not found`),
|
wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `persistentvolumeclaim "pvc-a" not found`),
|
||||||
wantFilterStatus: []*framework.Status{
|
wantFilterStatus: []*framework.Status{
|
||||||
@ -358,20 +247,20 @@ func TestVolumeBinding(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "pv not found",
|
name: "pv not found",
|
||||||
pod: makePod("pod-a", []string{"pvc-a"}),
|
pod: makePod("pod-a").withPVCVolume("pvc-a", "").Pod,
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
makeNode("node-a"),
|
makeNode("node-a").Node,
|
||||||
},
|
},
|
||||||
pvcs: []*v1.PersistentVolumeClaim{
|
pvcs: []*v1.PersistentVolumeClaim{
|
||||||
makePVC("pvc-a", "pv-a", waitSC.Name),
|
makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
|
||||||
},
|
},
|
||||||
wantPreFilterStatus: nil,
|
wantPreFilterStatus: nil,
|
||||||
wantStateAfterPreFilter: &stateData{
|
wantStateAfterPreFilter: &stateData{
|
||||||
boundClaims: []*v1.PersistentVolumeClaim{
|
boundClaims: []*v1.PersistentVolumeClaim{
|
||||||
makePVC("pvc-a", "pv-a", waitSC.Name),
|
makePVC("pvc-a", waitSC.Name).withBoundPV("pv-a").PersistentVolumeClaim,
|
||||||
},
|
},
|
||||||
claimsToBind: []*v1.PersistentVolumeClaim{},
|
claimsToBind: []*v1.PersistentVolumeClaim{},
|
||||||
podVolumesByNode: map[string]*scheduling.PodVolumes{},
|
podVolumesByNode: map[string]*PodVolumes{},
|
||||||
},
|
},
|
||||||
wantFilterStatus: []*framework.Status{
|
wantFilterStatus: []*framework.Status{
|
||||||
framework.NewStatus(framework.UnschedulableAndUnresolvable, `pvc(s) bound to non-existent pv(s)`),
|
framework.NewStatus(framework.UnschedulableAndUnresolvable, `pvc(s) bound to non-existent pv(s)`),
|
||||||
@ -382,29 +271,41 @@ func TestVolumeBinding(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "local volumes with close capacity are preferred",
|
name: "local volumes with close capacity are preferred",
|
||||||
pod: makePod("pod-a", []string{"pvc-a"}),
|
pod: makePod("pod-a").withPVCVolume("pvc-a", "").Pod,
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
makeNode("node-a"),
|
makeNode("node-a").Node,
|
||||||
makeNode("node-b"),
|
makeNode("node-b").Node,
|
||||||
makeNode("node-c"),
|
makeNode("node-c").Node,
|
||||||
},
|
},
|
||||||
pvcs: []*v1.PersistentVolumeClaim{
|
pvcs: []*v1.PersistentVolumeClaim{
|
||||||
setPVCRequestStorage(makePVC("pvc-a", "", waitSC.Name), resource.MustParse("50Gi")),
|
makePVC("pvc-a", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
|
||||||
},
|
},
|
||||||
pvs: []*v1.PersistentVolume{
|
pvs: []*v1.PersistentVolume{
|
||||||
setPVNodeAffinity(setPVCapacity(makePV("pv-a-0", waitSC.Name), resource.MustParse("200Gi")), map[string][]string{v1.LabelHostname: {"node-a"}}),
|
makePV("pv-a-0", waitSC.Name).
|
||||||
setPVNodeAffinity(setPVCapacity(makePV("pv-a-1", waitSC.Name), resource.MustParse("200Gi")), map[string][]string{v1.LabelHostname: {"node-a"}}),
|
withPhase(v1.VolumeAvailable).
|
||||||
setPVNodeAffinity(setPVCapacity(makePV("pv-b-0", waitSC.Name), resource.MustParse("100Gi")), map[string][]string{v1.LabelHostname: {"node-b"}}),
|
withCapacity(resource.MustParse("200Gi")).
|
||||||
setPVNodeAffinity(setPVCapacity(makePV("pv-b-1", waitSC.Name), resource.MustParse("100Gi")), map[string][]string{v1.LabelHostname: {"node-b"}}),
|
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
|
||||||
|
makePV("pv-a-1", waitSC.Name).
|
||||||
|
withPhase(v1.VolumeAvailable).
|
||||||
|
withCapacity(resource.MustParse("200Gi")).
|
||||||
|
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
|
||||||
|
makePV("pv-b-0", waitSC.Name).
|
||||||
|
withPhase(v1.VolumeAvailable).
|
||||||
|
withCapacity(resource.MustParse("100Gi")).
|
||||||
|
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
|
||||||
|
makePV("pv-b-1", waitSC.Name).
|
||||||
|
withPhase(v1.VolumeAvailable).
|
||||||
|
withCapacity(resource.MustParse("100Gi")).
|
||||||
|
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
|
||||||
},
|
},
|
||||||
feature: features.VolumeCapacityPriority,
|
feature: features.VolumeCapacityPriority,
|
||||||
wantPreFilterStatus: nil,
|
wantPreFilterStatus: nil,
|
||||||
wantStateAfterPreFilter: &stateData{
|
wantStateAfterPreFilter: &stateData{
|
||||||
boundClaims: []*v1.PersistentVolumeClaim{},
|
boundClaims: []*v1.PersistentVolumeClaim{},
|
||||||
claimsToBind: []*v1.PersistentVolumeClaim{
|
claimsToBind: []*v1.PersistentVolumeClaim{
|
||||||
setPVCRequestStorage(makePVC("pvc-a", "", waitSC.Name), resource.MustParse("50Gi")),
|
makePVC("pvc-a", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
|
||||||
},
|
},
|
||||||
podVolumesByNode: map[string]*scheduling.PodVolumes{},
|
podVolumesByNode: map[string]*PodVolumes{},
|
||||||
},
|
},
|
||||||
wantFilterStatus: []*framework.Status{
|
wantFilterStatus: []*framework.Status{
|
||||||
nil,
|
nil,
|
||||||
@ -419,35 +320,59 @@ func TestVolumeBinding(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "local volumes with close capacity are preferred (multiple pvcs)",
|
name: "local volumes with close capacity are preferred (multiple pvcs)",
|
||||||
pod: makePod("pod-a", []string{"pvc-0", "pvc-1"}),
|
pod: makePod("pod-a").withPVCVolume("pvc-0", "").withPVCVolume("pvc-1", "").Pod,
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
makeNode("node-a"),
|
makeNode("node-a").Node,
|
||||||
makeNode("node-b"),
|
makeNode("node-b").Node,
|
||||||
makeNode("node-c"),
|
makeNode("node-c").Node,
|
||||||
},
|
},
|
||||||
pvcs: []*v1.PersistentVolumeClaim{
|
pvcs: []*v1.PersistentVolumeClaim{
|
||||||
setPVCRequestStorage(makePVC("pvc-0", "", waitSC.Name), resource.MustParse("50Gi")),
|
makePVC("pvc-0", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
|
||||||
setPVCRequestStorage(makePVC("pvc-1", "", waitHDDSC.Name), resource.MustParse("100Gi")),
|
makePVC("pvc-1", waitHDDSC.Name).withRequestStorage(resource.MustParse("100Gi")).PersistentVolumeClaim,
|
||||||
},
|
},
|
||||||
pvs: []*v1.PersistentVolume{
|
pvs: []*v1.PersistentVolume{
|
||||||
setPVNodeAffinity(setPVCapacity(makePV("pv-a-0", waitSC.Name), resource.MustParse("200Gi")), map[string][]string{v1.LabelHostname: {"node-a"}}),
|
makePV("pv-a-0", waitSC.Name).
|
||||||
setPVNodeAffinity(setPVCapacity(makePV("pv-a-1", waitSC.Name), resource.MustParse("200Gi")), map[string][]string{v1.LabelHostname: {"node-a"}}),
|
withPhase(v1.VolumeAvailable).
|
||||||
setPVNodeAffinity(setPVCapacity(makePV("pv-a-2", waitHDDSC.Name), resource.MustParse("200Gi")), map[string][]string{v1.LabelHostname: {"node-a"}}),
|
withCapacity(resource.MustParse("200Gi")).
|
||||||
setPVNodeAffinity(setPVCapacity(makePV("pv-a-3", waitHDDSC.Name), resource.MustParse("200Gi")), map[string][]string{v1.LabelHostname: {"node-a"}}),
|
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
|
||||||
setPVNodeAffinity(setPVCapacity(makePV("pv-b-0", waitSC.Name), resource.MustParse("100Gi")), map[string][]string{v1.LabelHostname: {"node-b"}}),
|
makePV("pv-a-1", waitSC.Name).
|
||||||
setPVNodeAffinity(setPVCapacity(makePV("pv-b-1", waitSC.Name), resource.MustParse("100Gi")), map[string][]string{v1.LabelHostname: {"node-b"}}),
|
withPhase(v1.VolumeAvailable).
|
||||||
setPVNodeAffinity(setPVCapacity(makePV("pv-b-2", waitHDDSC.Name), resource.MustParse("100Gi")), map[string][]string{v1.LabelHostname: {"node-b"}}),
|
withCapacity(resource.MustParse("200Gi")).
|
||||||
setPVNodeAffinity(setPVCapacity(makePV("pv-b-3", waitHDDSC.Name), resource.MustParse("100Gi")), map[string][]string{v1.LabelHostname: {"node-b"}}),
|
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
|
||||||
|
makePV("pv-a-2", waitHDDSC.Name).
|
||||||
|
withPhase(v1.VolumeAvailable).
|
||||||
|
withCapacity(resource.MustParse("200Gi")).
|
||||||
|
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
|
||||||
|
makePV("pv-a-3", waitHDDSC.Name).
|
||||||
|
withPhase(v1.VolumeAvailable).
|
||||||
|
withCapacity(resource.MustParse("200Gi")).
|
||||||
|
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
|
||||||
|
makePV("pv-b-0", waitSC.Name).
|
||||||
|
withPhase(v1.VolumeAvailable).
|
||||||
|
withCapacity(resource.MustParse("100Gi")).
|
||||||
|
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
|
||||||
|
makePV("pv-b-1", waitSC.Name).
|
||||||
|
withPhase(v1.VolumeAvailable).
|
||||||
|
withCapacity(resource.MustParse("100Gi")).
|
||||||
|
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
|
||||||
|
makePV("pv-b-2", waitHDDSC.Name).
|
||||||
|
withPhase(v1.VolumeAvailable).
|
||||||
|
withCapacity(resource.MustParse("100Gi")).
|
||||||
|
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
|
||||||
|
makePV("pv-b-3", waitHDDSC.Name).
|
||||||
|
withPhase(v1.VolumeAvailable).
|
||||||
|
withCapacity(resource.MustParse("100Gi")).
|
||||||
|
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
|
||||||
},
|
},
|
||||||
feature: features.VolumeCapacityPriority,
|
feature: features.VolumeCapacityPriority,
|
||||||
wantPreFilterStatus: nil,
|
wantPreFilterStatus: nil,
|
||||||
wantStateAfterPreFilter: &stateData{
|
wantStateAfterPreFilter: &stateData{
|
||||||
boundClaims: []*v1.PersistentVolumeClaim{},
|
boundClaims: []*v1.PersistentVolumeClaim{},
|
||||||
claimsToBind: []*v1.PersistentVolumeClaim{
|
claimsToBind: []*v1.PersistentVolumeClaim{
|
||||||
setPVCRequestStorage(makePVC("pvc-0", "", waitSC.Name), resource.MustParse("50Gi")),
|
makePVC("pvc-0", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
|
||||||
setPVCRequestStorage(makePVC("pvc-1", "", waitHDDSC.Name), resource.MustParse("100Gi")),
|
makePVC("pvc-1", waitHDDSC.Name).withRequestStorage(resource.MustParse("100Gi")).PersistentVolumeClaim,
|
||||||
},
|
},
|
||||||
podVolumesByNode: map[string]*scheduling.PodVolumes{},
|
podVolumesByNode: map[string]*PodVolumes{},
|
||||||
},
|
},
|
||||||
wantFilterStatus: []*framework.Status{
|
wantFilterStatus: []*framework.Status{
|
||||||
nil,
|
nil,
|
||||||
@ -462,62 +387,68 @@ func TestVolumeBinding(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "zonal volumes with close capacity are preferred",
|
name: "zonal volumes with close capacity are preferred",
|
||||||
pod: makePod("pod-a", []string{"pvc-a"}),
|
pod: makePod("pod-a").withPVCVolume("pvc-a", "").Pod,
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
mergeNodeLabels(makeNode("zone-a-node-a"), map[string]string{
|
makeNode("zone-a-node-a").
|
||||||
"topology.kubernetes.io/region": "region-a",
|
withLabel("topology.kubernetes.io/region", "region-a").
|
||||||
"topology.kubernetes.io/zone": "zone-a",
|
withLabel("topology.kubernetes.io/zone", "zone-a").Node,
|
||||||
}),
|
makeNode("zone-a-node-b").
|
||||||
mergeNodeLabels(makeNode("zone-a-node-b"), map[string]string{
|
withLabel("topology.kubernetes.io/region", "region-a").
|
||||||
"topology.kubernetes.io/region": "region-a",
|
withLabel("topology.kubernetes.io/zone", "zone-a").Node,
|
||||||
"topology.kubernetes.io/zone": "zone-a",
|
makeNode("zone-b-node-a").
|
||||||
}),
|
withLabel("topology.kubernetes.io/region", "region-b").
|
||||||
mergeNodeLabels(makeNode("zone-b-node-a"), map[string]string{
|
withLabel("topology.kubernetes.io/zone", "zone-b").Node,
|
||||||
"topology.kubernetes.io/region": "region-b",
|
makeNode("zone-b-node-b").
|
||||||
"topology.kubernetes.io/zone": "zone-b",
|
withLabel("topology.kubernetes.io/region", "region-b").
|
||||||
}),
|
withLabel("topology.kubernetes.io/zone", "zone-b").Node,
|
||||||
mergeNodeLabels(makeNode("zone-b-node-b"), map[string]string{
|
makeNode("zone-c-node-a").
|
||||||
"topology.kubernetes.io/region": "region-b",
|
withLabel("topology.kubernetes.io/region", "region-c").
|
||||||
"topology.kubernetes.io/zone": "zone-b",
|
withLabel("topology.kubernetes.io/zone", "zone-c").Node,
|
||||||
}),
|
makeNode("zone-c-node-b").
|
||||||
mergeNodeLabels(makeNode("zone-c-node-a"), map[string]string{
|
withLabel("topology.kubernetes.io/region", "region-c").
|
||||||
"topology.kubernetes.io/region": "region-c",
|
withLabel("topology.kubernetes.io/zone", "zone-c").Node,
|
||||||
"topology.kubernetes.io/zone": "zone-c",
|
|
||||||
}),
|
|
||||||
mergeNodeLabels(makeNode("zone-c-node-b"), map[string]string{
|
|
||||||
"topology.kubernetes.io/region": "region-c",
|
|
||||||
"topology.kubernetes.io/zone": "zone-c",
|
|
||||||
}),
|
|
||||||
},
|
},
|
||||||
pvcs: []*v1.PersistentVolumeClaim{
|
pvcs: []*v1.PersistentVolumeClaim{
|
||||||
setPVCRequestStorage(makePVC("pvc-a", "", waitSC.Name), resource.MustParse("50Gi")),
|
makePVC("pvc-a", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
|
||||||
},
|
},
|
||||||
pvs: []*v1.PersistentVolume{
|
pvs: []*v1.PersistentVolume{
|
||||||
setPVNodeAffinity(setPVCapacity(makePV("pv-a-0", waitSC.Name), resource.MustParse("200Gi")), map[string][]string{
|
makePV("pv-a-0", waitSC.Name).
|
||||||
"topology.kubernetes.io/region": {"region-a"},
|
withPhase(v1.VolumeAvailable).
|
||||||
"topology.kubernetes.io/zone": {"zone-a"},
|
withCapacity(resource.MustParse("200Gi")).
|
||||||
}),
|
withNodeAffinity(map[string][]string{
|
||||||
setPVNodeAffinity(setPVCapacity(makePV("pv-a-1", waitSC.Name), resource.MustParse("200Gi")), map[string][]string{
|
"topology.kubernetes.io/region": {"region-a"},
|
||||||
"topology.kubernetes.io/region": {"region-a"},
|
"topology.kubernetes.io/zone": {"zone-a"},
|
||||||
"topology.kubernetes.io/zone": {"zone-a"},
|
}).PersistentVolume,
|
||||||
}),
|
makePV("pv-a-1", waitSC.Name).
|
||||||
setPVNodeAffinity(setPVCapacity(makePV("pv-b-0", waitSC.Name), resource.MustParse("100Gi")), map[string][]string{
|
withPhase(v1.VolumeAvailable).
|
||||||
"topology.kubernetes.io/region": {"region-b"},
|
withCapacity(resource.MustParse("200Gi")).
|
||||||
"topology.kubernetes.io/zone": {"zone-b"},
|
withNodeAffinity(map[string][]string{
|
||||||
}),
|
"topology.kubernetes.io/region": {"region-a"},
|
||||||
setPVNodeAffinity(setPVCapacity(makePV("pv-b-1", waitSC.Name), resource.MustParse("100Gi")), map[string][]string{
|
"topology.kubernetes.io/zone": {"zone-a"},
|
||||||
"topology.kubernetes.io/region": {"region-b"},
|
}).PersistentVolume,
|
||||||
"topology.kubernetes.io/zone": {"zone-b"},
|
makePV("pv-b-0", waitSC.Name).
|
||||||
}),
|
withPhase(v1.VolumeAvailable).
|
||||||
|
withCapacity(resource.MustParse("100Gi")).
|
||||||
|
withNodeAffinity(map[string][]string{
|
||||||
|
"topology.kubernetes.io/region": {"region-b"},
|
||||||
|
"topology.kubernetes.io/zone": {"zone-b"},
|
||||||
|
}).PersistentVolume,
|
||||||
|
makePV("pv-b-1", waitSC.Name).
|
||||||
|
withPhase(v1.VolumeAvailable).
|
||||||
|
withCapacity(resource.MustParse("100Gi")).
|
||||||
|
withNodeAffinity(map[string][]string{
|
||||||
|
"topology.kubernetes.io/region": {"region-b"},
|
||||||
|
"topology.kubernetes.io/zone": {"zone-b"},
|
||||||
|
}).PersistentVolume,
|
||||||
},
|
},
|
||||||
feature: features.VolumeCapacityPriority,
|
feature: features.VolumeCapacityPriority,
|
||||||
wantPreFilterStatus: nil,
|
wantPreFilterStatus: nil,
|
||||||
wantStateAfterPreFilter: &stateData{
|
wantStateAfterPreFilter: &stateData{
|
||||||
boundClaims: []*v1.PersistentVolumeClaim{},
|
boundClaims: []*v1.PersistentVolumeClaim{},
|
||||||
claimsToBind: []*v1.PersistentVolumeClaim{
|
claimsToBind: []*v1.PersistentVolumeClaim{
|
||||||
setPVCRequestStorage(makePVC("pvc-a", "", waitSC.Name), resource.MustParse("50Gi")),
|
makePVC("pvc-a", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
|
||||||
},
|
},
|
||||||
podVolumesByNode: map[string]*scheduling.PodVolumes{},
|
podVolumesByNode: map[string]*PodVolumes{},
|
||||||
},
|
},
|
||||||
wantFilterStatus: []*framework.Status{
|
wantFilterStatus: []*framework.Status{
|
||||||
nil,
|
nil,
|
||||||
@ -538,53 +469,59 @@ func TestVolumeBinding(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "zonal volumes with close capacity are preferred (custom shape)",
|
name: "zonal volumes with close capacity are preferred (custom shape)",
|
||||||
pod: makePod("pod-a", []string{"pvc-a"}),
|
pod: makePod("pod-a").withPVCVolume("pvc-a", "").Pod,
|
||||||
nodes: []*v1.Node{
|
nodes: []*v1.Node{
|
||||||
mergeNodeLabels(makeNode("zone-a-node-a"), map[string]string{
|
makeNode("zone-a-node-a").
|
||||||
"topology.kubernetes.io/region": "region-a",
|
withLabel("topology.kubernetes.io/region", "region-a").
|
||||||
"topology.kubernetes.io/zone": "zone-a",
|
withLabel("topology.kubernetes.io/zone", "zone-a").Node,
|
||||||
}),
|
makeNode("zone-a-node-b").
|
||||||
mergeNodeLabels(makeNode("zone-a-node-b"), map[string]string{
|
withLabel("topology.kubernetes.io/region", "region-a").
|
||||||
"topology.kubernetes.io/region": "region-a",
|
withLabel("topology.kubernetes.io/zone", "zone-a").Node,
|
||||||
"topology.kubernetes.io/zone": "zone-a",
|
makeNode("zone-b-node-a").
|
||||||
}),
|
withLabel("topology.kubernetes.io/region", "region-b").
|
||||||
mergeNodeLabels(makeNode("zone-b-node-a"), map[string]string{
|
withLabel("topology.kubernetes.io/zone", "zone-b").Node,
|
||||||
"topology.kubernetes.io/region": "region-b",
|
makeNode("zone-b-node-b").
|
||||||
"topology.kubernetes.io/zone": "zone-b",
|
withLabel("topology.kubernetes.io/region", "region-b").
|
||||||
}),
|
withLabel("topology.kubernetes.io/zone", "zone-b").Node,
|
||||||
mergeNodeLabels(makeNode("zone-b-node-b"), map[string]string{
|
makeNode("zone-c-node-a").
|
||||||
"topology.kubernetes.io/region": "region-b",
|
withLabel("topology.kubernetes.io/region", "region-c").
|
||||||
"topology.kubernetes.io/zone": "zone-b",
|
withLabel("topology.kubernetes.io/zone", "zone-c").Node,
|
||||||
}),
|
makeNode("zone-c-node-b").
|
||||||
mergeNodeLabels(makeNode("zone-c-node-a"), map[string]string{
|
withLabel("topology.kubernetes.io/region", "region-c").
|
||||||
"topology.kubernetes.io/region": "region-c",
|
withLabel("topology.kubernetes.io/zone", "zone-c").Node,
|
||||||
"topology.kubernetes.io/zone": "zone-c",
|
|
||||||
}),
|
|
||||||
mergeNodeLabels(makeNode("zone-c-node-b"), map[string]string{
|
|
||||||
"topology.kubernetes.io/region": "region-c",
|
|
||||||
"topology.kubernetes.io/zone": "zone-c",
|
|
||||||
}),
|
|
||||||
},
|
},
|
||||||
pvcs: []*v1.PersistentVolumeClaim{
|
pvcs: []*v1.PersistentVolumeClaim{
|
||||||
setPVCRequestStorage(makePVC("pvc-a", "", waitSC.Name), resource.MustParse("50Gi")),
|
makePVC("pvc-a", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
|
||||||
},
|
},
|
||||||
pvs: []*v1.PersistentVolume{
|
pvs: []*v1.PersistentVolume{
|
||||||
setPVNodeAffinity(setPVCapacity(makePV("pv-a-0", waitSC.Name), resource.MustParse("200Gi")), map[string][]string{
|
makePV("pv-a-0", waitSC.Name).
|
||||||
"topology.kubernetes.io/region": {"region-a"},
|
withPhase(v1.VolumeAvailable).
|
||||||
"topology.kubernetes.io/zone": {"zone-a"},
|
withCapacity(resource.MustParse("200Gi")).
|
||||||
}),
|
withNodeAffinity(map[string][]string{
|
||||||
setPVNodeAffinity(setPVCapacity(makePV("pv-a-1", waitSC.Name), resource.MustParse("200Gi")), map[string][]string{
|
"topology.kubernetes.io/region": {"region-a"},
|
||||||
"topology.kubernetes.io/region": {"region-a"},
|
"topology.kubernetes.io/zone": {"zone-a"},
|
||||||
"topology.kubernetes.io/zone": {"zone-a"},
|
}).PersistentVolume,
|
||||||
}),
|
makePV("pv-a-1", waitSC.Name).
|
||||||
setPVNodeAffinity(setPVCapacity(makePV("pv-b-0", waitSC.Name), resource.MustParse("100Gi")), map[string][]string{
|
withPhase(v1.VolumeAvailable).
|
||||||
"topology.kubernetes.io/region": {"region-b"},
|
withCapacity(resource.MustParse("200Gi")).
|
||||||
"topology.kubernetes.io/zone": {"zone-b"},
|
withNodeAffinity(map[string][]string{
|
||||||
}),
|
"topology.kubernetes.io/region": {"region-a"},
|
||||||
setPVNodeAffinity(setPVCapacity(makePV("pv-b-1", waitSC.Name), resource.MustParse("100Gi")), map[string][]string{
|
"topology.kubernetes.io/zone": {"zone-a"},
|
||||||
"topology.kubernetes.io/region": {"region-b"},
|
}).PersistentVolume,
|
||||||
"topology.kubernetes.io/zone": {"zone-b"},
|
makePV("pv-b-0", waitSC.Name).
|
||||||
}),
|
withPhase(v1.VolumeAvailable).
|
||||||
|
withCapacity(resource.MustParse("100Gi")).
|
||||||
|
withNodeAffinity(map[string][]string{
|
||||||
|
"topology.kubernetes.io/region": {"region-b"},
|
||||||
|
"topology.kubernetes.io/zone": {"zone-b"},
|
||||||
|
}).PersistentVolume,
|
||||||
|
makePV("pv-b-1", waitSC.Name).
|
||||||
|
withPhase(v1.VolumeAvailable).
|
||||||
|
withCapacity(resource.MustParse("100Gi")).
|
||||||
|
withNodeAffinity(map[string][]string{
|
||||||
|
"topology.kubernetes.io/region": {"region-b"},
|
||||||
|
"topology.kubernetes.io/zone": {"zone-b"},
|
||||||
|
}).PersistentVolume,
|
||||||
},
|
},
|
||||||
feature: features.VolumeCapacityPriority,
|
feature: features.VolumeCapacityPriority,
|
||||||
args: &config.VolumeBindingArgs{
|
args: &config.VolumeBindingArgs{
|
||||||
@ -608,9 +545,9 @@ func TestVolumeBinding(t *testing.T) {
|
|||||||
wantStateAfterPreFilter: &stateData{
|
wantStateAfterPreFilter: &stateData{
|
||||||
boundClaims: []*v1.PersistentVolumeClaim{},
|
boundClaims: []*v1.PersistentVolumeClaim{},
|
||||||
claimsToBind: []*v1.PersistentVolumeClaim{
|
claimsToBind: []*v1.PersistentVolumeClaim{
|
||||||
setPVCRequestStorage(makePVC("pvc-a", "", waitSC.Name), resource.MustParse("50Gi")),
|
makePVC("pvc-a", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
|
||||||
},
|
},
|
||||||
podVolumesByNode: map[string]*scheduling.PodVolumes{},
|
podVolumesByNode: map[string]*PodVolumes{},
|
||||||
},
|
},
|
||||||
wantFilterStatus: []*framework.Status{
|
wantFilterStatus: []*framework.Status{
|
||||||
nil,
|
nil,
|
||||||
|
@ -30,7 +30,6 @@ import (
|
|||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
clientsetfake "k8s.io/client-go/kubernetes/fake"
|
clientsetfake "k8s.io/client-go/kubernetes/fake"
|
||||||
extenderv1 "k8s.io/kube-scheduler/extender/v1"
|
extenderv1 "k8s.io/kube-scheduler/extender/v1"
|
||||||
volumescheduling "k8s.io/kubernetes/pkg/controller/volume/scheduling"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity"
|
||||||
@ -41,6 +40,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/queuesort"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
|
||||||
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone"
|
||||||
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||||
@ -135,8 +135,8 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
|
|||||||
name: "ErrVolume... errors should not be tried as it indicates that the pod is unschedulable due to no matching volumes for pod on node",
|
name: "ErrVolume... errors should not be tried as it indicates that the pod is unschedulable due to no matching volumes for pod on node",
|
||||||
nodesStatuses: framework.NodeToStatusMap{
|
nodesStatuses: framework.NodeToStatusMap{
|
||||||
"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumezone.ErrReasonConflict),
|
"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumezone.ErrReasonConflict),
|
||||||
"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, string(volumescheduling.ErrReasonNodeConflict)),
|
"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, string(volumebinding.ErrReasonNodeConflict)),
|
||||||
"node3": framework.NewStatus(framework.UnschedulableAndUnresolvable, string(volumescheduling.ErrReasonBindConflict)),
|
"node3": framework.NewStatus(framework.UnschedulableAndUnresolvable, string(volumebinding.ErrReasonBindConflict)),
|
||||||
},
|
},
|
||||||
expected: sets.NewString("node4"),
|
expected: sets.NewString("node4"),
|
||||||
},
|
},
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
|
|
||||||
"k8s.io/component-base/metrics"
|
"k8s.io/component-base/metrics"
|
||||||
"k8s.io/component-base/metrics/legacyregistry"
|
"k8s.io/component-base/metrics/legacyregistry"
|
||||||
volumeschedulingmetrics "k8s.io/kubernetes/pkg/controller/volume/scheduling/metrics"
|
volumebindingmetrics "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding/metrics"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -189,7 +189,7 @@ func Register() {
|
|||||||
// Register the metrics.
|
// Register the metrics.
|
||||||
registerMetrics.Do(func() {
|
registerMetrics.Do(func() {
|
||||||
RegisterMetrics(metricsList...)
|
RegisterMetrics(metricsList...)
|
||||||
volumeschedulingmetrics.RegisterVolumeSchedulingMetrics()
|
volumebindingmetrics.RegisterVolumeSchedulingMetrics()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,7 +47,6 @@ import (
|
|||||||
clienttesting "k8s.io/client-go/testing"
|
clienttesting "k8s.io/client-go/testing"
|
||||||
clientcache "k8s.io/client-go/tools/cache"
|
clientcache "k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/tools/events"
|
"k8s.io/client-go/tools/events"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/scheduling"
|
|
||||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
|
||||||
@ -954,7 +953,7 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache internalcache.C
|
|||||||
return sched, bindingChan, errChan
|
return sched, bindingChan, errChan
|
||||||
}
|
}
|
||||||
|
|
||||||
func setupTestSchedulerWithVolumeBinding(volumeBinder scheduling.SchedulerVolumeBinder, stop <-chan struct{}, broadcaster events.EventBroadcaster) (*Scheduler, chan *v1.Binding, chan error) {
|
func setupTestSchedulerWithVolumeBinding(volumeBinder volumebinding.SchedulerVolumeBinder, stop <-chan struct{}, broadcaster events.EventBroadcaster) (*Scheduler, chan *v1.Binding, chan error) {
|
||||||
testNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}}
|
testNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", UID: types.UID("machine1")}}
|
||||||
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
|
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
|
||||||
pod := podWithID("foo", "")
|
pod := podWithID("foo", "")
|
||||||
@ -1009,11 +1008,11 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
|
|||||||
expectAssumeCalled bool
|
expectAssumeCalled bool
|
||||||
expectBindCalled bool
|
expectBindCalled bool
|
||||||
eventReason string
|
eventReason string
|
||||||
volumeBinderConfig *scheduling.FakeVolumeBinderConfig
|
volumeBinderConfig *volumebinding.FakeVolumeBinderConfig
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "all bound",
|
name: "all bound",
|
||||||
volumeBinderConfig: &scheduling.FakeVolumeBinderConfig{
|
volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{
|
||||||
AllBound: true,
|
AllBound: true,
|
||||||
},
|
},
|
||||||
expectAssumeCalled: true,
|
expectAssumeCalled: true,
|
||||||
@ -1022,32 +1021,32 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "bound/invalid pv affinity",
|
name: "bound/invalid pv affinity",
|
||||||
volumeBinderConfig: &scheduling.FakeVolumeBinderConfig{
|
volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{
|
||||||
AllBound: true,
|
AllBound: true,
|
||||||
FindReasons: scheduling.ConflictReasons{scheduling.ErrReasonNodeConflict},
|
FindReasons: volumebinding.ConflictReasons{volumebinding.ErrReasonNodeConflict},
|
||||||
},
|
},
|
||||||
eventReason: "FailedScheduling",
|
eventReason: "FailedScheduling",
|
||||||
expectError: makePredicateError("1 node(s) had volume node affinity conflict"),
|
expectError: makePredicateError("1 node(s) had volume node affinity conflict"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "unbound/no matches",
|
name: "unbound/no matches",
|
||||||
volumeBinderConfig: &scheduling.FakeVolumeBinderConfig{
|
volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{
|
||||||
FindReasons: scheduling.ConflictReasons{scheduling.ErrReasonBindConflict},
|
FindReasons: volumebinding.ConflictReasons{volumebinding.ErrReasonBindConflict},
|
||||||
},
|
},
|
||||||
eventReason: "FailedScheduling",
|
eventReason: "FailedScheduling",
|
||||||
expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind"),
|
expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "bound and unbound unsatisfied",
|
name: "bound and unbound unsatisfied",
|
||||||
volumeBinderConfig: &scheduling.FakeVolumeBinderConfig{
|
volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{
|
||||||
FindReasons: scheduling.ConflictReasons{scheduling.ErrReasonBindConflict, scheduling.ErrReasonNodeConflict},
|
FindReasons: volumebinding.ConflictReasons{volumebinding.ErrReasonBindConflict, volumebinding.ErrReasonNodeConflict},
|
||||||
},
|
},
|
||||||
eventReason: "FailedScheduling",
|
eventReason: "FailedScheduling",
|
||||||
expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind, 1 node(s) had volume node affinity conflict"),
|
expectError: makePredicateError("1 node(s) didn't find available persistent volumes to bind, 1 node(s) had volume node affinity conflict"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "unbound/found matches/bind succeeds",
|
name: "unbound/found matches/bind succeeds",
|
||||||
volumeBinderConfig: &scheduling.FakeVolumeBinderConfig{},
|
volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{},
|
||||||
expectAssumeCalled: true,
|
expectAssumeCalled: true,
|
||||||
expectBindCalled: true,
|
expectBindCalled: true,
|
||||||
expectPodBind: &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "foo-ns", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: "machine1"}},
|
expectPodBind: &v1.Binding{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "foo-ns", UID: types.UID("foo")}, Target: v1.ObjectReference{Kind: "Node", Name: "machine1"}},
|
||||||
@ -1055,7 +1054,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "predicate error",
|
name: "predicate error",
|
||||||
volumeBinderConfig: &scheduling.FakeVolumeBinderConfig{
|
volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{
|
||||||
FindErr: findErr,
|
FindErr: findErr,
|
||||||
},
|
},
|
||||||
eventReason: "FailedScheduling",
|
eventReason: "FailedScheduling",
|
||||||
@ -1063,7 +1062,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "assume error",
|
name: "assume error",
|
||||||
volumeBinderConfig: &scheduling.FakeVolumeBinderConfig{
|
volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{
|
||||||
AssumeErr: assumeErr,
|
AssumeErr: assumeErr,
|
||||||
},
|
},
|
||||||
expectAssumeCalled: true,
|
expectAssumeCalled: true,
|
||||||
@ -1072,7 +1071,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "bind error",
|
name: "bind error",
|
||||||
volumeBinderConfig: &scheduling.FakeVolumeBinderConfig{
|
volumeBinderConfig: &volumebinding.FakeVolumeBinderConfig{
|
||||||
BindErr: bindErr,
|
BindErr: bindErr,
|
||||||
},
|
},
|
||||||
expectAssumeCalled: true,
|
expectAssumeCalled: true,
|
||||||
@ -1085,7 +1084,7 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
|
|||||||
for _, item := range table {
|
for _, item := range table {
|
||||||
t.Run(item.name, func(t *testing.T) {
|
t.Run(item.name, func(t *testing.T) {
|
||||||
stop := make(chan struct{})
|
stop := make(chan struct{})
|
||||||
fakeVolumeBinder := scheduling.NewFakeVolumeBinder(item.volumeBinderConfig)
|
fakeVolumeBinder := volumebinding.NewFakeVolumeBinder(item.volumeBinderConfig)
|
||||||
s, bindingChan, errChan := setupTestSchedulerWithVolumeBinding(fakeVolumeBinder, stop, eventBroadcaster)
|
s, bindingChan, errChan := setupTestSchedulerWithVolumeBinding(fakeVolumeBinder, stop, eventBroadcaster)
|
||||||
eventChan := make(chan struct{})
|
eventChan := make(chan struct{})
|
||||||
stopFunc := eventBroadcaster.StartEventWatcher(func(obj runtime.Object) {
|
stopFunc := eventBroadcaster.StartEventWatcher(func(obj runtime.Object) {
|
||||||
|
@ -37,7 +37,6 @@ rules:
|
|||||||
- k8s.io/kubernetes/pkg/controller/service
|
- k8s.io/kubernetes/pkg/controller/service
|
||||||
- k8s.io/kubernetes/pkg/controller/util/node
|
- k8s.io/kubernetes/pkg/controller/util/node
|
||||||
- k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util
|
- k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util
|
||||||
- k8s.io/kubernetes/pkg/controller/volume/scheduling
|
|
||||||
- k8s.io/kubernetes/pkg/credentialprovider
|
- k8s.io/kubernetes/pkg/credentialprovider
|
||||||
- k8s.io/kubernetes/pkg/credentialprovider/aws
|
- k8s.io/kubernetes/pkg/credentialprovider/aws
|
||||||
- k8s.io/kubernetes/pkg/credentialprovider/azure
|
- k8s.io/kubernetes/pkg/credentialprovider/azure
|
||||||
|
@ -44,7 +44,6 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
cachetools "k8s.io/client-go/tools/cache"
|
cachetools "k8s.io/client-go/tools/cache"
|
||||||
watchtools "k8s.io/client-go/tools/watch"
|
watchtools "k8s.io/client-go/tools/watch"
|
||||||
"k8s.io/kubernetes/pkg/controller/volume/scheduling"
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2eevents "k8s.io/kubernetes/test/e2e/framework/events"
|
e2eevents "k8s.io/kubernetes/test/e2e/framework/events"
|
||||||
@ -1883,11 +1882,12 @@ const (
|
|||||||
pvcAsSourceProtectionFinalizer = "snapshot.storage.kubernetes.io/pvc-as-source-protection"
|
pvcAsSourceProtectionFinalizer = "snapshot.storage.kubernetes.io/pvc-as-source-protection"
|
||||||
volumeSnapshotContentFinalizer = "snapshot.storage.kubernetes.io/volumesnapshotcontent-bound-protection"
|
volumeSnapshotContentFinalizer = "snapshot.storage.kubernetes.io/volumesnapshotcontent-bound-protection"
|
||||||
volumeSnapshotBoundFinalizer = "snapshot.storage.kubernetes.io/volumesnapshot-bound-protection"
|
volumeSnapshotBoundFinalizer = "snapshot.storage.kubernetes.io/volumesnapshot-bound-protection"
|
||||||
|
errReasonNotEnoughSpace = "node(s) did not have enough free storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
errPodCompleted = fmt.Errorf("pod ran to completion")
|
errPodCompleted = fmt.Errorf("pod ran to completion")
|
||||||
errNotEnoughSpace = errors.New(scheduling.ErrReasonNotEnoughSpace)
|
errNotEnoughSpace = errors.New(errReasonNotEnoughSpace)
|
||||||
)
|
)
|
||||||
|
|
||||||
func podHasStorage(ctx context.Context, c clientset.Interface, podName, namespace string, when time.Time) wait.ConditionFunc {
|
func podHasStorage(ctx context.Context, c clientset.Interface, podName, namespace string, when time.Time) wait.ConditionFunc {
|
||||||
@ -1911,7 +1911,7 @@ func podHasStorage(ctx context.Context, c clientset.Interface, podName, namespac
|
|||||||
}
|
}
|
||||||
for _, event := range events.Items {
|
for _, event := range events.Items {
|
||||||
if /* event.CreationTimestamp.After(when) &&
|
if /* event.CreationTimestamp.After(when) &&
|
||||||
*/strings.Contains(event.Message, scheduling.ErrReasonNotEnoughSpace) {
|
*/strings.Contains(event.Message, errReasonNotEnoughSpace) {
|
||||||
return false, errNotEnoughSpace
|
return false, errNotEnoughSpace
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user