Merge pull request #128184 from cupnes/add-storage-capacity-scoring

KEP-4049: Add storage capacity scoring to VolumeBinding plugin
This commit is contained in:
Kubernetes Prow Robot 2025-03-18 03:49:49 -07:00 committed by GitHub
commit 83c5f99f97
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
18 changed files with 535 additions and 178 deletions

View File

@ -39,10 +39,8 @@ import (
basecompatibility "k8s.io/component-base/compatibility" basecompatibility "k8s.io/component-base/compatibility"
componentbaseconfig "k8s.io/component-base/config" componentbaseconfig "k8s.io/component-base/config"
"k8s.io/component-base/featuregate" "k8s.io/component-base/featuregate"
featuregatetesting "k8s.io/component-base/featuregate/testing"
configv1 "k8s.io/kube-scheduler/config/v1" configv1 "k8s.io/kube-scheduler/config/v1"
"k8s.io/kubernetes/cmd/kube-scheduler/app/options" "k8s.io/kubernetes/cmd/kube-scheduler/app/options"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/testing/defaults" "k8s.io/kubernetes/pkg/scheduler/apis/config/testing/defaults"
"k8s.io/kubernetes/pkg/scheduler/framework" "k8s.io/kubernetes/pkg/scheduler/framework"
@ -222,19 +220,6 @@ leaderElection:
wantErr bool wantErr bool
wantFeaturesGates map[string]bool wantFeaturesGates map[string]bool
}{ }{
{
name: "default config with an alpha feature enabled",
flags: []string{
"--kubeconfig", configKubeconfig,
"--feature-gates=VolumeCapacityPriority=true",
},
wantPlugins: map[string]*config.Plugins{
"default-scheduler": defaults.ExpandedPluginsV1,
},
restoreFeatures: map[featuregate.Feature]bool{
features.VolumeCapacityPriority: false,
},
},
{ {
name: "component configuration v1 with only scheduler name configured", name: "component configuration v1 with only scheduler name configured",
flags: []string{ flags: []string{
@ -435,9 +420,6 @@ leaderElection:
for _, tc := range testcases { for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) { t.Run(tc.name, func(t *testing.T) {
for k, v := range tc.restoreFeatures {
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, k, v)
}
componentGlobalsRegistry := basecompatibility.NewComponentGlobalsRegistry() componentGlobalsRegistry := basecompatibility.NewComponentGlobalsRegistry()
verKube := basecompatibility.NewEffectiveVersionFromString("1.32", "1.31", "1.31") verKube := basecompatibility.NewEffectiveVersionFromString("1.32", "1.31", "1.31")
fg := feature.DefaultFeatureGate.DeepCopy() fg := feature.DefaultFeatureGate.DeepCopy()

View File

@ -692,6 +692,13 @@ const (
// Enables trafficDistribution field on Services. // Enables trafficDistribution field on Services.
ServiceTrafficDistribution featuregate.Feature = "ServiceTrafficDistribution" ServiceTrafficDistribution featuregate.Feature = "ServiceTrafficDistribution"
// owner: @cupnes
// kep: https://kep.k8s.io/4049
//
// Enables scoring nodes by available storage capacity with
// StorageCapacityScoring feature gate.
StorageCapacityScoring featuregate.Feature = "StorageCapacityScoring"
// owner: @gjkim42 @SergeyKanzhelev @matthyx @tzneal // owner: @gjkim42 @SergeyKanzhelev @matthyx @tzneal
// kep: http://kep.k8s.io/753 // kep: http://kep.k8s.io/753
// //
@ -798,9 +805,6 @@ const (
// Enables user specified volume attributes for persistent volumes, like iops and throughput. // Enables user specified volume attributes for persistent volumes, like iops and throughput.
VolumeAttributesClass featuregate.Feature = "VolumeAttributesClass" VolumeAttributesClass featuregate.Feature = "VolumeAttributesClass"
// owner: @cofyc
VolumeCapacityPriority featuregate.Feature = "VolumeCapacityPriority"
// owner: @ksubrmnn // owner: @ksubrmnn
// //
// Allows kube-proxy to create DSR loadbalancers for Windows // Allows kube-proxy to create DSR loadbalancers for Windows

View File

@ -766,6 +766,10 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.31, remove in 1.33 {Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.31, remove in 1.33
}, },
StorageCapacityScoring: {
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Alpha},
},
StorageVersionMigrator: { StorageVersionMigrator: {
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha}, {Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
}, },
@ -836,10 +840,6 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta}, {Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta},
}, },
VolumeCapacityPriority: {
{Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha},
},
WinDSR: { WinDSR: {
{Version: version.MustParse("1.14"), Default: false, PreRelease: featuregate.Alpha}, {Version: version.MustParse("1.14"), Default: false, PreRelease: featuregate.Alpha},
}, },

View File

@ -64065,7 +64065,7 @@ func schema_k8sio_kube_scheduler_config_v1_VolumeBindingArgs(ref common.Referenc
}, },
}, },
SchemaProps: spec.SchemaProps{ SchemaProps: spec.SchemaProps{
Description: "Shape specifies the points defining the score function shape, which is used to score nodes based on the utilization of statically provisioned PVs. The utilization is calculated by dividing the total requested storage of the pod by the total capacity of feasible PVs on each node. Each point contains utilization (ranges from 0 to 100) and its associated score (ranges from 0 to 10). You can turn the priority by specifying different scores for different utilization numbers. The default shape points are: 1) 0 for 0 utilization 2) 10 for 100 utilization All points must be sorted in increasing order by utilization.", Description: "Shape specifies the points defining the score function shape, which is used to score nodes based on the utilization of provisioned PVs. The utilization is calculated by dividing the total requested storage of the pod by the total capacity of feasible PVs on each node. Each point contains utilization (ranges from 0 to 100) and its associated score (ranges from 0 to 10). You can turn the priority by specifying different scores for different utilization numbers. The default shape points are: 1) 10 for 0 utilization 2) 0 for 100 utilization All points must be sorted in increasing order by utilization.",
Type: []string{"array"}, Type: []string{"array"},
Items: &spec.SchemaOrArray{ Items: &spec.SchemaOrArray{
Schema: &spec.Schema{ Schema: &spec.Schema{

View File

@ -163,7 +163,7 @@ type VolumeBindingArgs struct {
// 1) 0 for 0 utilization // 1) 0 for 0 utilization
// 2) 10 for 100 utilization // 2) 10 for 100 utilization
// All points must be sorted in increasing order by utilization. // All points must be sorted in increasing order by utilization.
// +featureGate=VolumeCapacityPriority // +featureGate=StorageCapacityScoring
// +optional // +optional
Shape []UtilizationShapePoint Shape []UtilizationShapePoint
} }

View File

@ -192,15 +192,15 @@ func SetDefaults_VolumeBindingArgs(obj *configv1.VolumeBindingArgs) {
if obj.BindTimeoutSeconds == nil { if obj.BindTimeoutSeconds == nil {
obj.BindTimeoutSeconds = ptr.To[int64](600) obj.BindTimeoutSeconds = ptr.To[int64](600)
} }
if len(obj.Shape) == 0 && feature.DefaultFeatureGate.Enabled(features.VolumeCapacityPriority) { if len(obj.Shape) == 0 && feature.DefaultFeatureGate.Enabled(features.StorageCapacityScoring) {
obj.Shape = []configv1.UtilizationShapePoint{ obj.Shape = []configv1.UtilizationShapePoint{
{ {
Utilization: 0, Utilization: 0,
Score: 0, Score: int32(config.MaxCustomPriorityScore),
}, },
{ {
Utilization: 100, Utilization: 100,
Score: int32(config.MaxCustomPriorityScore), Score: 0,
}, },
} }
} }

View File

@ -810,9 +810,9 @@ func TestPluginArgsDefaults(t *testing.T) {
}, },
}, },
{ {
name: "VolumeBindingArgs empty, VolumeCapacityPriority disabled", name: "VolumeBindingArgs empty, StorageCapacityScoring disabled",
features: map[featuregate.Feature]bool{ features: map[featuregate.Feature]bool{
features.VolumeCapacityPriority: false, features.StorageCapacityScoring: false,
}, },
in: &configv1.VolumeBindingArgs{}, in: &configv1.VolumeBindingArgs{},
want: &configv1.VolumeBindingArgs{ want: &configv1.VolumeBindingArgs{
@ -820,16 +820,16 @@ func TestPluginArgsDefaults(t *testing.T) {
}, },
}, },
{ {
name: "VolumeBindingArgs empty, VolumeCapacityPriority enabled", name: "VolumeBindingArgs empty, StorageCapacityScoring enabled",
features: map[featuregate.Feature]bool{ features: map[featuregate.Feature]bool{
features.VolumeCapacityPriority: true, features.StorageCapacityScoring: true,
}, },
in: &configv1.VolumeBindingArgs{}, in: &configv1.VolumeBindingArgs{},
want: &configv1.VolumeBindingArgs{ want: &configv1.VolumeBindingArgs{
BindTimeoutSeconds: ptr.To[int64](600), BindTimeoutSeconds: ptr.To[int64](600),
Shape: []configv1.UtilizationShapePoint{ Shape: []configv1.UtilizationShapePoint{
{Utilization: 0, Score: 0}, {Utilization: 0, Score: 10},
{Utilization: 100, Score: 10}, {Utilization: 100, Score: 0},
}, },
}, },
}, },

View File

@ -261,13 +261,13 @@ func ValidateNodeAffinityArgs(path *field.Path, args *config.NodeAffinityArgs) e
// VolumeBindingArgsValidationOptions contains the different settings for validation. // VolumeBindingArgsValidationOptions contains the different settings for validation.
type VolumeBindingArgsValidationOptions struct { type VolumeBindingArgsValidationOptions struct {
AllowVolumeCapacityPriority bool AllowStorageCapacityScoring bool
} }
// ValidateVolumeBindingArgs validates that VolumeBindingArgs are set correctly. // ValidateVolumeBindingArgs validates that VolumeBindingArgs are set correctly.
func ValidateVolumeBindingArgs(path *field.Path, args *config.VolumeBindingArgs) error { func ValidateVolumeBindingArgs(path *field.Path, args *config.VolumeBindingArgs) error {
return ValidateVolumeBindingArgsWithOptions(path, args, VolumeBindingArgsValidationOptions{ return ValidateVolumeBindingArgsWithOptions(path, args, VolumeBindingArgsValidationOptions{
AllowVolumeCapacityPriority: utilfeature.DefaultFeatureGate.Enabled(features.VolumeCapacityPriority), AllowStorageCapacityScoring: utilfeature.DefaultFeatureGate.Enabled(features.StorageCapacityScoring),
}) })
} }
@ -279,13 +279,13 @@ func ValidateVolumeBindingArgsWithOptions(path *field.Path, args *config.VolumeB
allErrs = append(allErrs, field.Invalid(path.Child("bindTimeoutSeconds"), args.BindTimeoutSeconds, "invalid BindTimeoutSeconds, should not be a negative value")) allErrs = append(allErrs, field.Invalid(path.Child("bindTimeoutSeconds"), args.BindTimeoutSeconds, "invalid BindTimeoutSeconds, should not be a negative value"))
} }
if opts.AllowVolumeCapacityPriority { if opts.AllowStorageCapacityScoring {
allErrs = append(allErrs, validateFunctionShape(args.Shape, path.Child("shape"))...) allErrs = append(allErrs, validateFunctionShape(args.Shape, path.Child("shape"))...)
} else if args.Shape != nil { } else if args.Shape != nil {
// When the feature is off, return an error if the config is not nil. // When the feature is off, return an error if the config is not nil.
// This prevents unexpected configuration from taking effect when the // This prevents unexpected configuration from taking effect when the
// feature turns on in the future. // feature turns on in the future.
allErrs = append(allErrs, field.Invalid(path.Child("shape"), args.Shape, "unexpected field `shape`, remove it or turn on the feature gate VolumeCapacityPriority")) allErrs = append(allErrs, field.Invalid(path.Child("shape"), args.Shape, "unexpected field `shape`, remove it or turn on the feature gate StorageCapacityScoring"))
} }
return allErrs.ToAggregate() return allErrs.ToAggregate()
} }

View File

@ -559,9 +559,9 @@ func TestValidateVolumeBindingArgs(t *testing.T) {
}}), }}),
}, },
{ {
name: "[VolumeCapacityPriority=off] shape should be nil when the feature is off", name: "[StorageCapacityScoring=off] shape should be nil when the feature is off",
features: map[featuregate.Feature]bool{ features: map[featuregate.Feature]bool{
features.VolumeCapacityPriority: false, features.StorageCapacityScoring: false,
}, },
args: config.VolumeBindingArgs{ args: config.VolumeBindingArgs{
BindTimeoutSeconds: 10, BindTimeoutSeconds: 10,
@ -569,9 +569,9 @@ func TestValidateVolumeBindingArgs(t *testing.T) {
}, },
}, },
{ {
name: "[VolumeCapacityPriority=off] error if the shape is not nil when the feature is off", name: "[StorageCapacityScoring=off] error if the shape is not nil when the feature is off",
features: map[featuregate.Feature]bool{ features: map[featuregate.Feature]bool{
features.VolumeCapacityPriority: false, features.StorageCapacityScoring: false,
}, },
args: config.VolumeBindingArgs{ args: config.VolumeBindingArgs{
BindTimeoutSeconds: 10, BindTimeoutSeconds: 10,
@ -586,9 +586,9 @@ func TestValidateVolumeBindingArgs(t *testing.T) {
}}), }}),
}, },
{ {
name: "[VolumeCapacityPriority=on] shape should not be empty", name: "[StorageCapacityScoring=on] shape should not be empty",
features: map[featuregate.Feature]bool{ features: map[featuregate.Feature]bool{
features.VolumeCapacityPriority: true, features.StorageCapacityScoring: true,
}, },
args: config.VolumeBindingArgs{ args: config.VolumeBindingArgs{
BindTimeoutSeconds: 10, BindTimeoutSeconds: 10,
@ -600,9 +600,9 @@ func TestValidateVolumeBindingArgs(t *testing.T) {
}}), }}),
}, },
{ {
name: "[VolumeCapacityPriority=on] shape points must be sorted in increasing order", name: "[StorageCapacityScoring=on] shape points must be sorted in increasing order",
features: map[featuregate.Feature]bool{ features: map[featuregate.Feature]bool{
features.VolumeCapacityPriority: true, features.StorageCapacityScoring: true,
}, },
args: config.VolumeBindingArgs{ args: config.VolumeBindingArgs{
BindTimeoutSeconds: 10, BindTimeoutSeconds: 10,
@ -618,9 +618,9 @@ func TestValidateVolumeBindingArgs(t *testing.T) {
}}), }}),
}, },
{ {
name: "[VolumeCapacityPriority=on] shape point: invalid utilization and score", name: "[StorageCapacityScoring=on] shape point: invalid utilization and score",
features: map[featuregate.Feature]bool{ features: map[featuregate.Feature]bool{
features.VolumeCapacityPriority: true, features.StorageCapacityScoring: true,
}, },
args: config.VolumeBindingArgs{ args: config.VolumeBindingArgs{
BindTimeoutSeconds: 10, BindTimeoutSeconds: 10,

View File

@ -23,7 +23,6 @@ type Features struct {
EnableDRAPrioritizedList bool EnableDRAPrioritizedList bool
EnableDRAAdminAccess bool EnableDRAAdminAccess bool
EnableDynamicResourceAllocation bool EnableDynamicResourceAllocation bool
EnableVolumeCapacityPriority bool
EnableVolumeAttributesClass bool EnableVolumeAttributesClass bool
EnableCSIMigrationPortworx bool EnableCSIMigrationPortworx bool
EnableNodeInclusionPolicyInPodTopologySpread bool EnableNodeInclusionPolicyInPodTopologySpread bool
@ -33,4 +32,5 @@ type Features struct {
EnableSchedulingQueueHint bool EnableSchedulingQueueHint bool
EnableAsyncPreemption bool EnableAsyncPreemption bool
EnablePodLevelResources bool EnablePodLevelResources bool
EnableStorageCapacityScoring bool
} }

View File

@ -49,7 +49,6 @@ func NewInTreeRegistry() runtime.Registry {
EnableDRAPrioritizedList: feature.DefaultFeatureGate.Enabled(features.DRAPrioritizedList), EnableDRAPrioritizedList: feature.DefaultFeatureGate.Enabled(features.DRAPrioritizedList),
EnableDRAAdminAccess: feature.DefaultFeatureGate.Enabled(features.DRAAdminAccess), EnableDRAAdminAccess: feature.DefaultFeatureGate.Enabled(features.DRAAdminAccess),
EnableDynamicResourceAllocation: feature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation), EnableDynamicResourceAllocation: feature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation),
EnableVolumeCapacityPriority: feature.DefaultFeatureGate.Enabled(features.VolumeCapacityPriority),
EnableVolumeAttributesClass: feature.DefaultFeatureGate.Enabled(features.VolumeAttributesClass), EnableVolumeAttributesClass: feature.DefaultFeatureGate.Enabled(features.VolumeAttributesClass),
EnableCSIMigrationPortworx: feature.DefaultFeatureGate.Enabled(features.CSIMigrationPortworx), EnableCSIMigrationPortworx: feature.DefaultFeatureGate.Enabled(features.CSIMigrationPortworx),
EnableNodeInclusionPolicyInPodTopologySpread: feature.DefaultFeatureGate.Enabled(features.NodeInclusionPolicyInPodTopologySpread), EnableNodeInclusionPolicyInPodTopologySpread: feature.DefaultFeatureGate.Enabled(features.NodeInclusionPolicyInPodTopologySpread),
@ -59,6 +58,7 @@ func NewInTreeRegistry() runtime.Registry {
EnableSchedulingQueueHint: feature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints), EnableSchedulingQueueHint: feature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints),
EnableAsyncPreemption: feature.DefaultFeatureGate.Enabled(features.SchedulerAsyncPreemption), EnableAsyncPreemption: feature.DefaultFeatureGate.Enabled(features.SchedulerAsyncPreemption),
EnablePodLevelResources: feature.DefaultFeatureGate.Enabled(features.PodLevelResources), EnablePodLevelResources: feature.DefaultFeatureGate.Enabled(features.PodLevelResources),
EnableStorageCapacityScoring: feature.DefaultFeatureGate.Enabled(features.StorageCapacityScoring),
} }
registry := runtime.Registry{ registry := runtime.Registry{

View File

@ -102,13 +102,19 @@ func (b *BindingInfo) StorageResource() *StorageResource {
} }
} }
// DynamicProvision represents a dynamically provisioned volume.
type DynamicProvision struct {
PVC *v1.PersistentVolumeClaim
NodeCapacity *storagev1.CSIStorageCapacity
}
// PodVolumes holds pod's volumes information used in volume scheduling. // PodVolumes holds pod's volumes information used in volume scheduling.
type PodVolumes struct { type PodVolumes struct {
// StaticBindings are binding decisions for PVCs which can be bound to // StaticBindings are binding decisions for PVCs which can be bound to
// pre-provisioned static PVs. // pre-provisioned static PVs.
StaticBindings []*BindingInfo StaticBindings []*BindingInfo
// DynamicProvisions are PVCs that require dynamic provisioning // DynamicProvisions are PVCs that require dynamic provisioning
DynamicProvisions []*v1.PersistentVolumeClaim DynamicProvisions []*DynamicProvision
} }
// InTreeToCSITranslator contains methods required to check migratable status // InTreeToCSITranslator contains methods required to check migratable status
@ -310,7 +316,7 @@ func (b *volumeBinder) FindPodVolumes(logger klog.Logger, pod *v1.Pod, podVolume
var ( var (
staticBindings []*BindingInfo staticBindings []*BindingInfo
dynamicProvisions []*v1.PersistentVolumeClaim dynamicProvisions []*DynamicProvision
) )
defer func() { defer func() {
// Although we do not distinguish nil from empty in this function, for // Although we do not distinguish nil from empty in this function, for
@ -377,6 +383,16 @@ func (b *volumeBinder) FindPodVolumes(logger klog.Logger, pod *v1.Pod, podVolume
return return
} }
// ConvertDynamicProvisionsToPVCs converts a slice of *DynamicProvision to a
// slice of PersistentVolumeClaim
func convertDynamicProvisionsToPVCs(dynamicProvisions []*DynamicProvision) []*v1.PersistentVolumeClaim {
pvcs := make([]*v1.PersistentVolumeClaim, 0, len(dynamicProvisions))
for _, dynamicProvision := range dynamicProvisions {
pvcs = append(pvcs, dynamicProvision.PVC)
}
return pvcs
}
// AssumePodVolumes will take the matching PVs and PVCs to provision in pod's // AssumePodVolumes will take the matching PVs and PVCs to provision in pod's
// volume information for the chosen node, and: // volume information for the chosen node, and:
// 1. Update the pvCache with the new prebound PV. // 1. Update the pvCache with the new prebound PV.
@ -423,20 +439,21 @@ func (b *volumeBinder) AssumePodVolumes(logger klog.Logger, assumedPod *v1.Pod,
} }
// Assume PVCs // Assume PVCs
newProvisionedPVCs := []*v1.PersistentVolumeClaim{} newProvisionedPVCs := []*DynamicProvision{}
for _, claim := range podVolumes.DynamicProvisions { for _, dynamicProvision := range podVolumes.DynamicProvisions {
// The claims from method args can be pointing to watcher cache. We must not // The claims from method args can be pointing to watcher cache. We must not
// modify these, therefore create a copy. // modify these, therefore create a copy.
claimClone := claim.DeepCopy() claimClone := dynamicProvision.PVC.DeepCopy()
metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, volume.AnnSelectedNode, nodeName) metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, volume.AnnSelectedNode, nodeName)
err = b.pvcCache.Assume(claimClone) err = b.pvcCache.Assume(claimClone)
if err != nil { if err != nil {
pvcs := convertDynamicProvisionsToPVCs(newProvisionedPVCs)
b.revertAssumedPVs(newBindings) b.revertAssumedPVs(newBindings)
b.revertAssumedPVCs(newProvisionedPVCs) b.revertAssumedPVCs(pvcs)
return return
} }
newProvisionedPVCs = append(newProvisionedPVCs, claimClone) newProvisionedPVCs = append(newProvisionedPVCs, &DynamicProvision{PVC: claimClone})
} }
podVolumes.StaticBindings = newBindings podVolumes.StaticBindings = newBindings
@ -446,8 +463,9 @@ func (b *volumeBinder) AssumePodVolumes(logger klog.Logger, assumedPod *v1.Pod,
// RevertAssumedPodVolumes will revert assumed PV and PVC cache. // RevertAssumedPodVolumes will revert assumed PV and PVC cache.
func (b *volumeBinder) RevertAssumedPodVolumes(podVolumes *PodVolumes) { func (b *volumeBinder) RevertAssumedPodVolumes(podVolumes *PodVolumes) {
pvcs := convertDynamicProvisionsToPVCs(podVolumes.DynamicProvisions)
b.revertAssumedPVs(podVolumes.StaticBindings) b.revertAssumedPVs(podVolumes.StaticBindings)
b.revertAssumedPVCs(podVolumes.DynamicProvisions) b.revertAssumedPVCs(pvcs)
} }
// BindPodVolumes gets the cached bindings and PVCs to provision in pod's volumes information, // BindPodVolumes gets the cached bindings and PVCs to provision in pod's volumes information,
@ -464,7 +482,7 @@ func (b *volumeBinder) BindPodVolumes(ctx context.Context, assumedPod *v1.Pod, p
}() }()
bindings := podVolumes.StaticBindings bindings := podVolumes.StaticBindings
claimsToProvision := podVolumes.DynamicProvisions claimsToProvision := convertDynamicProvisionsToPVCs(podVolumes.DynamicProvisions)
// Start API operations // Start API operations
err = b.bindAPIUpdate(ctx, assumedPod, bindings, claimsToProvision) err = b.bindAPIUpdate(ctx, assumedPod, bindings, claimsToProvision)
@ -886,8 +904,8 @@ func (b *volumeBinder) findMatchingVolumes(logger klog.Logger, pod *v1.Pod, clai
// checkVolumeProvisions checks given unbound claims (the claims have gone through func // checkVolumeProvisions checks given unbound claims (the claims have gone through func
// findMatchingVolumes, and do not have matching volumes for binding), and return true // findMatchingVolumes, and do not have matching volumes for binding), and return true
// if all of the claims are eligible for dynamic provision. // if all of the claims are eligible for dynamic provision.
func (b *volumeBinder) checkVolumeProvisions(logger klog.Logger, pod *v1.Pod, claimsToProvision []*v1.PersistentVolumeClaim, node *v1.Node) (provisionSatisfied, sufficientStorage bool, dynamicProvisions []*v1.PersistentVolumeClaim, err error) { func (b *volumeBinder) checkVolumeProvisions(logger klog.Logger, pod *v1.Pod, claimsToProvision []*v1.PersistentVolumeClaim, node *v1.Node) (provisionSatisfied, sufficientStorage bool, dynamicProvisions []*DynamicProvision, err error) {
dynamicProvisions = []*v1.PersistentVolumeClaim{} dynamicProvisions = []*DynamicProvision{}
// We return early with provisionedClaims == nil if a check // We return early with provisionedClaims == nil if a check
// fails or we encounter an error. // fails or we encounter an error.
@ -915,7 +933,7 @@ func (b *volumeBinder) checkVolumeProvisions(logger klog.Logger, pod *v1.Pod, cl
} }
// Check storage capacity. // Check storage capacity.
sufficient, err := b.hasEnoughCapacity(logger, provisioner, claim, class, node) sufficient, capacity, err := b.hasEnoughCapacity(logger, provisioner, claim, class, node)
if err != nil { if err != nil {
return false, false, nil, err return false, false, nil, err
} }
@ -924,8 +942,10 @@ func (b *volumeBinder) checkVolumeProvisions(logger klog.Logger, pod *v1.Pod, cl
return true, false, nil, nil return true, false, nil, nil
} }
dynamicProvisions = append(dynamicProvisions, claim) dynamicProvisions = append(dynamicProvisions, &DynamicProvision{
PVC: claim,
NodeCapacity: capacity,
})
} }
logger.V(4).Info("Provisioning for claims of pod that has no matching volumes...", "claimCount", len(claimsToProvision), "pod", klog.KObj(pod), "node", klog.KObj(node)) logger.V(4).Info("Provisioning for claims of pod that has no matching volumes...", "claimCount", len(claimsToProvision), "pod", klog.KObj(pod), "node", klog.KObj(node))
@ -945,12 +965,12 @@ func (b *volumeBinder) revertAssumedPVCs(claims []*v1.PersistentVolumeClaim) {
} }
// hasEnoughCapacity checks whether the provisioner has enough capacity left for a new volume of the given size // hasEnoughCapacity checks whether the provisioner has enough capacity left for a new volume of the given size
// that is available from the node. // that is available from the node. This function returns the node capacity based on the PVC's storage class.
func (b *volumeBinder) hasEnoughCapacity(logger klog.Logger, provisioner string, claim *v1.PersistentVolumeClaim, storageClass *storagev1.StorageClass, node *v1.Node) (bool, error) { func (b *volumeBinder) hasEnoughCapacity(logger klog.Logger, provisioner string, claim *v1.PersistentVolumeClaim, storageClass *storagev1.StorageClass, node *v1.Node) (bool, *storagev1.CSIStorageCapacity, error) {
quantity, ok := claim.Spec.Resources.Requests[v1.ResourceStorage] quantity, ok := claim.Spec.Resources.Requests[v1.ResourceStorage]
if !ok { if !ok {
// No capacity to check for. // No capacity to check for.
return true, nil return true, nil, nil
} }
// Only enabled for CSI drivers which opt into it. // Only enabled for CSI drivers which opt into it.
@ -960,19 +980,19 @@ func (b *volumeBinder) hasEnoughCapacity(logger klog.Logger, provisioner string,
// Either the provisioner is not a CSI driver or the driver does not // Either the provisioner is not a CSI driver or the driver does not
// opt into storage capacity scheduling. Either way, skip // opt into storage capacity scheduling. Either way, skip
// capacity checking. // capacity checking.
return true, nil return true, nil, nil
} }
return false, err return false, nil, err
} }
if driver.Spec.StorageCapacity == nil || !*driver.Spec.StorageCapacity { if driver.Spec.StorageCapacity == nil || !*driver.Spec.StorageCapacity {
return true, nil return true, nil, nil
} }
// Look for a matching CSIStorageCapacity object(s). // Look for a matching CSIStorageCapacity object(s).
// TODO (for beta): benchmark this and potentially introduce some kind of lookup structure (https://github.com/kubernetes/enhancements/issues/1698#issuecomment-654356718). // TODO (for beta): benchmark this and potentially introduce some kind of lookup structure (https://github.com/kubernetes/enhancements/issues/1698#issuecomment-654356718).
capacities, err := b.csiStorageCapacityLister.List(labels.Everything()) capacities, err := b.csiStorageCapacityLister.List(labels.Everything())
if err != nil { if err != nil {
return false, err return false, nil, err
} }
sizeInBytes := quantity.Value() sizeInBytes := quantity.Value()
@ -981,7 +1001,7 @@ func (b *volumeBinder) hasEnoughCapacity(logger klog.Logger, provisioner string,
capacitySufficient(capacity, sizeInBytes) && capacitySufficient(capacity, sizeInBytes) &&
b.nodeHasAccess(logger, node, capacity) { b.nodeHasAccess(logger, node, capacity) {
// Enough capacity found. // Enough capacity found.
return true, nil return true, capacity, nil
} }
} }
@ -989,7 +1009,7 @@ func (b *volumeBinder) hasEnoughCapacity(logger klog.Logger, provisioner string,
// they had to be rejected. Log that above? But that might be a lot of log output... // they had to be rejected. Log that above? But that might be a lot of log output...
logger.V(4).Info("Node has no accessible CSIStorageCapacity with enough capacity for PVC", logger.V(4).Info("Node has no accessible CSIStorageCapacity with enough capacity for PVC",
"node", klog.KObj(node), "PVC", klog.KObj(claim), "size", sizeInBytes, "storageClass", klog.KObj(storageClass)) "node", klog.KObj(node), "PVC", klog.KObj(claim), "size", sizeInBytes, "storageClass", klog.KObj(storageClass))
return false, nil return false, nil, nil
} }
func capacitySufficient(capacity *storagev1.CSIStorageCapacity, sizeInBytes int64) bool { func capacitySufficient(capacity *storagev1.CSIStorageCapacity, sizeInBytes int64) bool {

View File

@ -122,6 +122,11 @@ var (
// node topology for CSI migration // node topology for CSI migration
zone1Labels = map[string]string{v1.LabelFailureDomainBetaZone: "us-east-1", v1.LabelFailureDomainBetaRegion: "us-east-1a"} zone1Labels = map[string]string{v1.LabelFailureDomainBetaZone: "us-east-1", v1.LabelFailureDomainBetaRegion: "us-east-1a"}
// csiCapacity objects
networkAttachedCapacity = makeCapacity("net", waitClassWithProvisioner, nil, "1Gi", "")
node1Capacity = makeCapacity("net", waitClassWithProvisioner, node1, "1Gi", "")
node2Capacity = makeCapacity("net", waitClassWithProvisioner, node2, "1Gi", "")
) )
type testEnv struct { type testEnv struct {
@ -396,14 +401,14 @@ func (env *testEnv) assumeVolumes(t *testing.T, node string, pod *v1.Pod, bindin
} }
} }
func (env *testEnv) validatePodCache(t *testing.T, node string, pod *v1.Pod, podVolumes *PodVolumes, expectedBindings []*BindingInfo, expectedProvisionings []*v1.PersistentVolumeClaim) { func (env *testEnv) validatePodCache(t *testing.T, node string, pod *v1.Pod, podVolumes *PodVolumes, expectedBindings []*BindingInfo, expectedProvisionings []*DynamicProvision) {
var ( var (
bindings []*BindingInfo bindings []*BindingInfo
provisionedClaims []*v1.PersistentVolumeClaim dynamicProvisions []*DynamicProvision
) )
if podVolumes != nil { if podVolumes != nil {
bindings = podVolumes.StaticBindings bindings = podVolumes.StaticBindings
provisionedClaims = podVolumes.DynamicProvisions dynamicProvisions = podVolumes.DynamicProvisions
} }
if aLen, eLen := len(bindings), len(expectedBindings); aLen != eLen { if aLen, eLen := len(bindings), len(expectedBindings); aLen != eLen {
t.Errorf("expected %v bindings, got %v", eLen, aLen) t.Errorf("expected %v bindings, got %v", eLen, aLen)
@ -427,17 +432,17 @@ func (env *testEnv) validatePodCache(t *testing.T, node string, pod *v1.Pod, pod
} }
} }
if aLen, eLen := len(provisionedClaims), len(expectedProvisionings); aLen != eLen { if aLen, eLen := len(dynamicProvisions), len(expectedProvisionings); aLen != eLen {
t.Errorf("expected %v provisioned claims, got %v", eLen, aLen) t.Errorf("expected %v provisioned claims, got %v", eLen, aLen)
} else if expectedProvisionings == nil && provisionedClaims != nil { } else if expectedProvisionings == nil && dynamicProvisions != nil {
// nil and empty are different // nil and empty are different
t.Error("expected nil provisionings, got empty") t.Error("expected nil provisionings, got empty")
} else if expectedProvisionings != nil && provisionedClaims == nil { } else if expectedProvisionings != nil && dynamicProvisions == nil {
// nil and empty are different // nil and empty are different
t.Error("expected empty provisionings, got nil") t.Error("expected empty provisionings, got nil")
} else { } else {
for i := 0; i < aLen; i++ { for i := 0; i < aLen; i++ {
if diff := cmp.Diff(expectedProvisionings[i], provisionedClaims[i]); diff != "" { if diff := cmp.Diff(expectedProvisionings[i], dynamicProvisions[i]); diff != "" {
t.Errorf("provisioned claims doesn't match (-want, +got):\n%s", diff) t.Errorf("provisioned claims doesn't match (-want, +got):\n%s", diff)
} }
} }
@ -1041,7 +1046,7 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) {
// Expected podBindingCache fields // Expected podBindingCache fields
expectedBindings []*BindingInfo expectedBindings []*BindingInfo
expectedProvisions []*v1.PersistentVolumeClaim expectedProvisions []*DynamicProvision
// Expected return values // Expected return values
reasons ConflictReasons reasons ConflictReasons
@ -1051,26 +1056,26 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) {
scenarios := map[string]scenarioType{ scenarios := map[string]scenarioType{
"one-provisioned": { "one-provisioned": {
podPVCs: []*v1.PersistentVolumeClaim{provisionedPVC}, podPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC}, expectedProvisions: []*DynamicProvision{{PVC: provisionedPVC}},
needsCapacity: true, needsCapacity: true,
}, },
"two-unbound-pvcs,one-matched,one-provisioned": { "two-unbound-pvcs,one-matched,one-provisioned": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC}, podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC},
pvs: []*v1.PersistentVolume{pvNode1a}, pvs: []*v1.PersistentVolume{pvNode1a},
expectedBindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1a)}, expectedBindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1a)},
expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC}, expectedProvisions: []*DynamicProvision{{PVC: provisionedPVC}},
needsCapacity: true, needsCapacity: true,
}, },
"one-bound,one-provisioned": { "one-bound,one-provisioned": {
podPVCs: []*v1.PersistentVolumeClaim{boundPVC, provisionedPVC}, podPVCs: []*v1.PersistentVolumeClaim{boundPVC, provisionedPVC},
pvs: []*v1.PersistentVolume{pvBound}, pvs: []*v1.PersistentVolume{pvBound},
expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC}, expectedProvisions: []*DynamicProvision{{PVC: provisionedPVC}},
needsCapacity: true, needsCapacity: true,
}, },
"one-binding,one-selected-node": { "one-binding,one-selected-node": {
podPVCs: []*v1.PersistentVolumeClaim{boundPVC, selectedNodePVC}, podPVCs: []*v1.PersistentVolumeClaim{boundPVC, selectedNodePVC},
pvs: []*v1.PersistentVolume{pvBound}, pvs: []*v1.PersistentVolume{pvBound},
expectedProvisions: []*v1.PersistentVolumeClaim{selectedNodePVC}, expectedProvisions: []*DynamicProvision{{PVC: selectedNodePVC}},
needsCapacity: true, needsCapacity: true,
}, },
"immediate-unbound-pvc": { "immediate-unbound-pvc": {
@ -1080,7 +1085,7 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) {
"one-immediate-bound,one-provisioned": { "one-immediate-bound,one-provisioned": {
podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC, provisionedPVC}, podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC, provisionedPVC},
pvs: []*v1.PersistentVolume{pvBoundImmediate}, pvs: []*v1.PersistentVolume{pvBoundImmediate},
expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC}, expectedProvisions: []*DynamicProvision{{PVC: provisionedPVC}},
needsCapacity: true, needsCapacity: true,
}, },
"invalid-provisioner": { "invalid-provisioner": {
@ -1266,17 +1271,17 @@ func TestFindPodVolumesWithCSIMigration(t *testing.T) {
func TestAssumePodVolumes(t *testing.T) { func TestAssumePodVolumes(t *testing.T) {
type scenarioType struct { type scenarioType struct {
// Inputs // Inputs
podPVCs []*v1.PersistentVolumeClaim podPVCs []*v1.PersistentVolumeClaim
pvs []*v1.PersistentVolume pvs []*v1.PersistentVolume
bindings []*BindingInfo bindings []*BindingInfo
provisionedPVCs []*v1.PersistentVolumeClaim dynamicProvisions []*DynamicProvision
// Expected return values // Expected return values
shouldFail bool shouldFail bool
expectedAllBound bool expectedAllBound bool
expectedBindings []*BindingInfo expectedBindings []*BindingInfo
expectedProvisionings []*v1.PersistentVolumeClaim expectedProvisionings []*DynamicProvision
} }
scenarios := map[string]scenarioType{ scenarios := map[string]scenarioType{
"all-bound": { "all-bound": {
@ -1289,21 +1294,21 @@ func TestAssumePodVolumes(t *testing.T) {
bindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1a)}, bindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1a)},
pvs: []*v1.PersistentVolume{pvNode1a}, pvs: []*v1.PersistentVolume{pvNode1a},
expectedBindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1aBound)}, expectedBindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1aBound)},
expectedProvisionings: []*v1.PersistentVolumeClaim{}, expectedProvisionings: []*DynamicProvision{},
}, },
"two-bindings": { "two-bindings": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2}, podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2},
bindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1a), makeBinding(unboundPVC2, pvNode1b)}, bindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1a), makeBinding(unboundPVC2, pvNode1b)},
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b}, pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
expectedBindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1aBound), makeBinding(unboundPVC2, pvNode1bBound)}, expectedBindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1aBound), makeBinding(unboundPVC2, pvNode1bBound)},
expectedProvisionings: []*v1.PersistentVolumeClaim{}, expectedProvisionings: []*DynamicProvision{},
}, },
"pv-already-bound": { "pv-already-bound": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC}, podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
bindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1aBound)}, bindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1aBound)},
pvs: []*v1.PersistentVolume{pvNode1aBound}, pvs: []*v1.PersistentVolume{pvNode1aBound},
expectedBindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1aBound)}, expectedBindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1aBound)},
expectedProvisionings: []*v1.PersistentVolumeClaim{}, expectedProvisionings: []*DynamicProvision{},
}, },
"tmpupdate-failed": { "tmpupdate-failed": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC}, podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
@ -1315,16 +1320,16 @@ func TestAssumePodVolumes(t *testing.T) {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC}, podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC},
bindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1a)}, bindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1a)},
pvs: []*v1.PersistentVolume{pvNode1a}, pvs: []*v1.PersistentVolume{pvNode1a},
provisionedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC}, dynamicProvisions: []*DynamicProvision{{PVC: provisionedPVC}},
expectedBindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1aBound)}, expectedBindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1aBound)},
expectedProvisionings: []*v1.PersistentVolumeClaim{selectedNodePVC}, expectedProvisionings: []*DynamicProvision{{PVC: selectedNodePVC}},
}, },
"one-binding, one-provision-tmpupdate-failed": { "one-binding, one-provision-tmpupdate-failed": {
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVCHigherVersion}, podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVCHigherVersion},
bindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1a)}, bindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1a)},
pvs: []*v1.PersistentVolume{pvNode1a}, pvs: []*v1.PersistentVolume{pvNode1a},
provisionedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC2}, dynamicProvisions: []*DynamicProvision{{PVC: provisionedPVC2}},
shouldFail: true, shouldFail: true,
}, },
} }
@ -1342,7 +1347,7 @@ func TestAssumePodVolumes(t *testing.T) {
withPVCSVolume(scenario.podPVCs).Pod withPVCSVolume(scenario.podPVCs).Pod
podVolumes := &PodVolumes{ podVolumes := &PodVolumes{
StaticBindings: scenario.bindings, StaticBindings: scenario.bindings,
DynamicProvisions: scenario.provisionedPVCs, DynamicProvisions: scenario.dynamicProvisions,
} }
testEnv.initVolumes(scenario.pvs, scenario.pvs) testEnv.initVolumes(scenario.pvs, scenario.pvs)
@ -1363,12 +1368,14 @@ func TestAssumePodVolumes(t *testing.T) {
scenario.expectedBindings = scenario.bindings scenario.expectedBindings = scenario.bindings
} }
if scenario.expectedProvisionings == nil { if scenario.expectedProvisionings == nil {
scenario.expectedProvisionings = scenario.provisionedPVCs scenario.expectedProvisionings = scenario.dynamicProvisions
} }
if scenario.shouldFail { if scenario.shouldFail {
testEnv.validateCacheRestored(t, pod, scenario.bindings, scenario.provisionedPVCs) pvcs := convertDynamicProvisionsToPVCs(scenario.dynamicProvisions)
testEnv.validateCacheRestored(t, pod, scenario.bindings, pvcs)
} else { } else {
testEnv.validateAssume(t, pod, scenario.expectedBindings, scenario.expectedProvisionings) pvcs := convertDynamicProvisionsToPVCs(scenario.expectedProvisionings)
testEnv.validateAssume(t, pod, scenario.expectedBindings, pvcs)
} }
testEnv.validatePodCache(t, pod.Spec.NodeName, pod, podVolumes, scenario.expectedBindings, scenario.expectedProvisionings) testEnv.validatePodCache(t, pod.Spec.NodeName, pod, podVolumes, scenario.expectedBindings, scenario.expectedProvisionings)
} }
@ -1386,7 +1393,7 @@ func TestRevertAssumedPodVolumes(t *testing.T) {
podPVCs := []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC} podPVCs := []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC}
bindings := []*BindingInfo{makeBinding(unboundPVC, pvNode1a)} bindings := []*BindingInfo{makeBinding(unboundPVC, pvNode1a)}
pvs := []*v1.PersistentVolume{pvNode1a} pvs := []*v1.PersistentVolume{pvNode1a}
provisionedPVCs := []*v1.PersistentVolumeClaim{provisionedPVC} dynamicProvisions := []*DynamicProvision{{PVC: provisionedPVC}}
expectedBindings := []*BindingInfo{makeBinding(unboundPVC, pvNode1aBound)} expectedBindings := []*BindingInfo{makeBinding(unboundPVC, pvNode1aBound)}
expectedProvisionings := []*v1.PersistentVolumeClaim{selectedNodePVC} expectedProvisionings := []*v1.PersistentVolumeClaim{selectedNodePVC}
@ -1399,7 +1406,7 @@ func TestRevertAssumedPodVolumes(t *testing.T) {
withPVCSVolume(podPVCs).Pod withPVCSVolume(podPVCs).Pod
podVolumes := &PodVolumes{ podVolumes := &PodVolumes{
StaticBindings: bindings, StaticBindings: bindings,
DynamicProvisions: provisionedPVCs, DynamicProvisions: dynamicProvisions,
} }
testEnv.initVolumes(pvs, pvs) testEnv.initVolumes(pvs, pvs)
@ -1409,8 +1416,9 @@ func TestRevertAssumedPodVolumes(t *testing.T) {
} }
testEnv.validateAssume(t, pod, expectedBindings, expectedProvisionings) testEnv.validateAssume(t, pod, expectedBindings, expectedProvisionings)
claims := convertDynamicProvisionsToPVCs(dynamicProvisions)
testEnv.binder.RevertAssumedPodVolumes(podVolumes) testEnv.binder.RevertAssumedPodVolumes(podVolumes)
testEnv.validateCacheRestored(t, pod, bindings, provisionedPVCs) testEnv.validateCacheRestored(t, pod, bindings, claims)
} }
func TestBindAPIUpdate(t *testing.T) { func TestBindAPIUpdate(t *testing.T) {
@ -2075,9 +2083,13 @@ func TestBindPodVolumes(t *testing.T) {
} }
// Execute // Execute
dynamicProvisions := []*DynamicProvision{}
for _, claim := range claimsToProvision {
dynamicProvisions = append(dynamicProvisions, &DynamicProvision{PVC: claim})
}
podVolumes := &PodVolumes{ podVolumes := &PodVolumes{
StaticBindings: bindings, StaticBindings: bindings,
DynamicProvisions: claimsToProvision, DynamicProvisions: dynamicProvisions,
} }
err := testEnv.binder.BindPodVolumes(ctx, pod, podVolumes) err := testEnv.binder.BindPodVolumes(ctx, pod, podVolumes)
@ -2173,29 +2185,42 @@ func TestCapacity(t *testing.T) {
capacities []*storagev1.CSIStorageCapacity capacities []*storagev1.CSIStorageCapacity
// Expected return values // Expected return values
reasons ConflictReasons expectedProvisions []*DynamicProvision
shouldFail bool reasons ConflictReasons
shouldFail bool
} }
scenarios := map[string]scenarioType{ scenarios := map[string]scenarioType{
"network-attached": { "network-attached": {
pvcs: []*v1.PersistentVolumeClaim{provisionedPVC}, pvcs: []*v1.PersistentVolumeClaim{provisionedPVC},
capacities: []*storagev1.CSIStorageCapacity{ capacities: []*storagev1.CSIStorageCapacity{
makeCapacity("net", waitClassWithProvisioner, nil, "1Gi", ""), networkAttachedCapacity,
}, },
expectedProvisions: []*DynamicProvision{{
PVC: provisionedPVC,
NodeCapacity: networkAttachedCapacity,
}},
}, },
"local-storage": { "local-storage": {
pvcs: []*v1.PersistentVolumeClaim{provisionedPVC}, pvcs: []*v1.PersistentVolumeClaim{provisionedPVC},
capacities: []*storagev1.CSIStorageCapacity{ capacities: []*storagev1.CSIStorageCapacity{
makeCapacity("net", waitClassWithProvisioner, node1, "1Gi", ""), node1Capacity,
}, },
expectedProvisions: []*DynamicProvision{{
PVC: provisionedPVC,
NodeCapacity: node1Capacity,
}},
}, },
"multiple": { "multiple": {
pvcs: []*v1.PersistentVolumeClaim{provisionedPVC}, pvcs: []*v1.PersistentVolumeClaim{provisionedPVC},
capacities: []*storagev1.CSIStorageCapacity{ capacities: []*storagev1.CSIStorageCapacity{
makeCapacity("net", waitClassWithProvisioner, nil, "1Gi", ""), networkAttachedCapacity,
makeCapacity("net", waitClassWithProvisioner, node2, "1Gi", ""), node2Capacity,
makeCapacity("net", waitClassWithProvisioner, node1, "1Gi", ""), node1Capacity,
}, },
expectedProvisions: []*DynamicProvision{{
PVC: provisionedPVC,
NodeCapacity: node1Capacity,
}},
}, },
"no-storage": { "no-storage": {
pvcs: []*v1.PersistentVolumeClaim{provisionedPVC}, pvcs: []*v1.PersistentVolumeClaim{provisionedPVC},
@ -2297,11 +2322,16 @@ func TestCapacity(t *testing.T) {
t.Error("returned success but expected error") t.Error("returned success but expected error")
} }
checkReasons(t, reasons, expectedReasons) checkReasons(t, reasons, expectedReasons)
provisions := scenario.pvcs expectedProvisions := scenario.expectedProvisions
if len(reasons) > 0 { if !optIn {
provisions = nil for i := 0; i < len(expectedProvisions); i++ {
expectedProvisions[i].NodeCapacity = nil
}
} }
testEnv.validatePodCache(t, pod.Spec.NodeName, pod, podVolumes, nil, provisions) if len(scenario.reasons) > 0 {
expectedProvisions = podVolumes.DynamicProvisions
}
testEnv.validatePodCache(t, pod.Spec.NodeName, pod, podVolumes, nil, expectedProvisions)
} }
yesNo := []bool{true, false} yesNo := []bool{true, false}

View File

@ -29,6 +29,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
corelisters "k8s.io/client-go/listers/core/v1" corelisters "k8s.io/client-go/listers/core/v1"
storagelisters "k8s.io/client-go/listers/storage/v1"
"k8s.io/component-helpers/storage/ephemeral" "k8s.io/component-helpers/storage/ephemeral"
"k8s.io/klog/v2" "k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/apis/config"
@ -70,10 +71,11 @@ func (d *stateData) Clone() framework.StateData {
// In the Filter phase, pod binding cache is created for the pod and used in // In the Filter phase, pod binding cache is created for the pod and used in
// Reserve and PreBind phases. // Reserve and PreBind phases.
type VolumeBinding struct { type VolumeBinding struct {
Binder SchedulerVolumeBinder Binder SchedulerVolumeBinder
PVCLister corelisters.PersistentVolumeClaimLister PVCLister corelisters.PersistentVolumeClaimLister
scorer volumeCapacityScorer classLister storagelisters.StorageClassLister
fts feature.Features scorer volumeCapacityScorer
fts feature.Features
} }
var _ framework.PreFilterPlugin = &VolumeBinding{} var _ framework.PreFilterPlugin = &VolumeBinding{}
@ -451,7 +453,7 @@ func (pl *VolumeBinding) PreScore(ctx context.Context, cs *framework.CycleState,
if err != nil { if err != nil {
return framework.AsStatus(err) return framework.AsStatus(err)
} }
if state.hasStaticBindings { if state.hasStaticBindings || pl.fts.EnableStorageCapacityScoring {
return nil return nil
} }
return framework.NewStatus(framework.Skip) return framework.NewStatus(framework.Skip)
@ -471,20 +473,44 @@ func (pl *VolumeBinding) Score(ctx context.Context, cs *framework.CycleState, po
if !ok { if !ok {
return 0, nil return 0, nil
} }
// group by storage class
classResources := make(classResourceMap) classResources := make(classResourceMap)
for _, staticBinding := range podVolumes.StaticBindings { if len(podVolumes.StaticBindings) != 0 || !pl.fts.EnableStorageCapacityScoring {
class := staticBinding.StorageClassName() // group static binding volumes by storage class
storageResource := staticBinding.StorageResource() for _, staticBinding := range podVolumes.StaticBindings {
if _, ok := classResources[class]; !ok { class := staticBinding.StorageClassName()
classResources[class] = &StorageResource{ storageResource := staticBinding.StorageResource()
Requested: 0, if _, ok := classResources[class]; !ok {
Capacity: 0, classResources[class] = &StorageResource{
Requested: 0,
Capacity: 0,
}
} }
classResources[class].Requested += storageResource.Requested
classResources[class].Capacity += storageResource.Capacity
}
} else {
// group dynamic binding volumes by storage class
for _, provision := range podVolumes.DynamicProvisions {
if provision.NodeCapacity == nil {
continue
}
class := *provision.PVC.Spec.StorageClassName
if _, ok := classResources[class]; !ok {
classResources[class] = &StorageResource{
Requested: 0,
Capacity: 0,
}
}
// The following line cannot be +=. For example, if a Pod requests two 50GB volumes from
// a StorageClass with 100GB of capacity on a node, this part of the code will be executed twice.
// In that case, using += would incorrectly set classResources[class].Capacity to 200GB.
classResources[class].Capacity = provision.NodeCapacity.Capacity.Value()
requestedQty := provision.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
classResources[class].Requested += requestedQty.Value()
} }
classResources[class].Requested += storageResource.Requested
classResources[class].Capacity += storageResource.Capacity
} }
return pl.scorer(classResources), nil return pl.scorer(classResources), nil
} }
@ -566,7 +592,7 @@ func New(ctx context.Context, plArgs runtime.Object, fh framework.Handle, fts fe
return nil, fmt.Errorf("want args to be of type VolumeBindingArgs, got %T", plArgs) return nil, fmt.Errorf("want args to be of type VolumeBindingArgs, got %T", plArgs)
} }
if err := validation.ValidateVolumeBindingArgsWithOptions(nil, args, validation.VolumeBindingArgsValidationOptions{ if err := validation.ValidateVolumeBindingArgsWithOptions(nil, args, validation.VolumeBindingArgsValidationOptions{
AllowVolumeCapacityPriority: fts.EnableVolumeCapacityPriority, AllowStorageCapacityScoring: fts.EnableStorageCapacityScoring,
}); err != nil { }); err != nil {
return nil, err return nil, err
} }
@ -584,7 +610,7 @@ func New(ctx context.Context, plArgs runtime.Object, fh framework.Handle, fts fe
// build score function // build score function
var scorer volumeCapacityScorer var scorer volumeCapacityScorer
if fts.EnableVolumeCapacityPriority { if fts.EnableStorageCapacityScoring {
shape := make(helper.FunctionShape, 0, len(args.Shape)) shape := make(helper.FunctionShape, 0, len(args.Shape))
for _, point := range args.Shape { for _, point := range args.Shape {
shape = append(shape, helper.FunctionShapePoint{ shape = append(shape, helper.FunctionShapePoint{
@ -595,9 +621,10 @@ func New(ctx context.Context, plArgs runtime.Object, fh framework.Handle, fts fe
scorer = buildScorerFunction(shape) scorer = buildScorerFunction(shape)
} }
return &VolumeBinding{ return &VolumeBinding{
Binder: binder, Binder: binder,
PVCLister: pvcInformer.Lister(), PVCLister: pvcInformer.Lister(),
scorer: scorer, classLister: storageClassInformer.Lister(),
fts: fts, scorer: scorer,
fts: fts,
}, nil }, nil
} }

View File

@ -59,6 +59,22 @@ var (
}, },
VolumeBindingMode: &waitForFirstConsumer, VolumeBindingMode: &waitForFirstConsumer,
} }
waitSCWithStorageCapacity = &storagev1.StorageClass{
ObjectMeta: metav1.ObjectMeta{
Name: "wait-sc-with-storage-capacity",
},
Provisioner: "driver-with-storage-capacity",
VolumeBindingMode: &waitForFirstConsumer,
}
driverWithStorageCapacity = &storagev1.CSIDriver{
ObjectMeta: metav1.ObjectMeta{
Name: "driver-with-storage-capacity",
},
Spec: storagev1.CSIDriverSpec{
StorageCapacity: ptr.To(true),
},
}
defaultShapePoint = []config.UtilizationShapePoint{ defaultShapePoint = []config.UtilizationShapePoint{
{ {
@ -79,6 +95,7 @@ func TestVolumeBinding(t *testing.T) {
nodes []*v1.Node nodes []*v1.Node
pvcs []*v1.PersistentVolumeClaim pvcs []*v1.PersistentVolumeClaim
pvs []*v1.PersistentVolume pvs []*v1.PersistentVolume
capacities []*storagev1.CSIStorageCapacity
fts feature.Features fts feature.Features
args *config.VolumeBindingArgs args *config.VolumeBindingArgs
wantPreFilterResult *framework.PreFilterResult wantPreFilterResult *framework.PreFilterResult
@ -310,7 +327,7 @@ func TestVolumeBinding(t *testing.T) {
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume, withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
}, },
fts: feature.Features{ fts: feature.Features{
EnableVolumeCapacityPriority: true, EnableStorageCapacityScoring: true,
}, },
wantPreFilterStatus: nil, wantPreFilterStatus: nil,
wantStateAfterPreFilter: &stateData{ wantStateAfterPreFilter: &stateData{
@ -400,7 +417,7 @@ func TestVolumeBinding(t *testing.T) {
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume, withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
}, },
fts: feature.Features{ fts: feature.Features{
EnableVolumeCapacityPriority: true, EnableStorageCapacityScoring: true,
}, },
wantPreFilterStatus: nil, wantPreFilterStatus: nil,
wantStateAfterPreFilter: &stateData{ wantStateAfterPreFilter: &stateData{
@ -519,7 +536,7 @@ func TestVolumeBinding(t *testing.T) {
}).PersistentVolume, }).PersistentVolume,
}, },
fts: feature.Features{ fts: feature.Features{
EnableVolumeCapacityPriority: true, EnableStorageCapacityScoring: true,
}, },
wantPreFilterStatus: nil, wantPreFilterStatus: nil,
wantStateAfterPreFilter: &stateData{ wantStateAfterPreFilter: &stateData{
@ -637,7 +654,7 @@ func TestVolumeBinding(t *testing.T) {
}).PersistentVolume, }).PersistentVolume,
}, },
fts: feature.Features{ fts: feature.Features{
EnableVolumeCapacityPriority: true, EnableStorageCapacityScoring: true,
}, },
args: &config.VolumeBindingArgs{ args: &config.VolumeBindingArgs{
BindTimeoutSeconds: 300, BindTimeoutSeconds: 300,
@ -716,6 +733,250 @@ func TestVolumeBinding(t *testing.T) {
0, 0,
}, },
}, },
{
name: "storage capacity score",
pod: makePod("pod-a").withPVCVolume("pvc-dynamic", "").Pod,
nodes: []*v1.Node{
makeNode("node-a").withLabel(nodeLabelKey, "node-a").Node,
makeNode("node-b").withLabel(nodeLabelKey, "node-b").Node,
makeNode("node-c").withLabel(nodeLabelKey, "node-c").Node,
},
pvcs: []*v1.PersistentVolumeClaim{
makePVC("pvc-dynamic", waitSCWithStorageCapacity.Name).withRequestStorage(resource.MustParse("10Gi")).PersistentVolumeClaim,
},
capacities: []*storagev1.CSIStorageCapacity{
makeCapacity("node-a", waitSCWithStorageCapacity.Name, makeNode("node-a").withLabel(nodeLabelKey, "node-a").Node, "100Gi", ""),
makeCapacity("node-b", waitSCWithStorageCapacity.Name, makeNode("node-b").withLabel(nodeLabelKey, "node-b").Node, "50Gi", ""),
makeCapacity("node-c", waitSCWithStorageCapacity.Name, makeNode("node-c").withLabel(nodeLabelKey, "node-c").Node, "10Gi", ""),
},
fts: feature.Features{
EnableStorageCapacityScoring: true,
},
wantPreFilterStatus: nil,
wantStateAfterPreFilter: &stateData{
podVolumeClaims: &PodVolumeClaims{
boundClaims: []*v1.PersistentVolumeClaim{},
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
makePVC("pvc-dynamic", waitSCWithStorageCapacity.Name).withRequestStorage(resource.MustParse("10Gi")).PersistentVolumeClaim,
},
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{waitSCWithStorageCapacity.Name: {}},
},
podVolumesByNode: map[string]*PodVolumes{},
},
wantFilterStatus: []*framework.Status{
nil,
nil,
nil,
},
wantScores: []int64{
10,
20,
100,
},
},
{
name: "storage capacity score with static binds",
pod: makePod("pod-a").withPVCVolume("pvc-dynamic", "").withPVCVolume("pvc-static", "").Pod,
nodes: []*v1.Node{
makeNode("node-a").withLabel(nodeLabelKey, "node-a").Node,
makeNode("node-b").withLabel(nodeLabelKey, "node-b").Node,
makeNode("node-c").withLabel(nodeLabelKey, "node-c").Node,
},
pvcs: []*v1.PersistentVolumeClaim{
makePVC("pvc-dynamic", waitSCWithStorageCapacity.Name).withRequestStorage(resource.MustParse("10Gi")).PersistentVolumeClaim,
makePVC("pvc-static", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
},
pvs: []*v1.PersistentVolume{
makePV("pv-static-a", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
makePV("pv-static-b", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
makePV("pv-static-c", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-c"}}).PersistentVolume,
},
capacities: []*storagev1.CSIStorageCapacity{
makeCapacity("node-a", waitSCWithStorageCapacity.Name, makeNode("node-a").withLabel(nodeLabelKey, "node-a").Node, "100Gi", ""),
makeCapacity("node-b", waitSCWithStorageCapacity.Name, makeNode("node-b").withLabel(nodeLabelKey, "node-b").Node, "50Gi", ""),
makeCapacity("node-c", waitSCWithStorageCapacity.Name, makeNode("node-c").withLabel(nodeLabelKey, "node-c").Node, "10Gi", ""),
},
fts: feature.Features{
EnableStorageCapacityScoring: true,
},
wantPreFilterStatus: nil,
wantStateAfterPreFilter: &stateData{
podVolumeClaims: &PodVolumeClaims{
boundClaims: []*v1.PersistentVolumeClaim{},
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
makePVC("pvc-dynamic", waitSCWithStorageCapacity.Name).withRequestStorage(resource.MustParse("10Gi")).PersistentVolumeClaim,
makePVC("pvc-static", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
},
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{
waitSC.Name: {
makePV("pv-static-a", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
makePV("pv-static-b", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
makePV("pv-static-c", waitSC.Name).
withPhase(v1.VolumeAvailable).
withCapacity(resource.MustParse("100Gi")).
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-c"}}).PersistentVolume,
},
waitSCWithStorageCapacity.Name: {},
},
},
podVolumesByNode: map[string]*PodVolumes{},
},
wantFilterStatus: []*framework.Status{
nil,
nil,
nil,
},
wantScores: []int64{
50,
50,
50,
},
},
{
name: "dynamic provisioning with multiple PVCs of the same StorageClass",
pod: makePod("pod-a").withPVCVolume("pvc-dynamic-0", "").withPVCVolume("pvc-dynamic-1", "").Pod,
nodes: []*v1.Node{
makeNode("node-a").withLabel(nodeLabelKey, "node-a").Node,
},
pvcs: []*v1.PersistentVolumeClaim{
makePVC("pvc-dynamic-0", waitSCWithStorageCapacity.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
makePVC("pvc-dynamic-1", waitSCWithStorageCapacity.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
},
capacities: []*storagev1.CSIStorageCapacity{
makeCapacity("node-a", waitSCWithStorageCapacity.Name, makeNode("node-a").withLabel(nodeLabelKey, "node-a").Node, "100Gi", ""),
},
fts: feature.Features{
EnableStorageCapacityScoring: true,
},
wantPreFilterStatus: nil,
wantStateAfterPreFilter: &stateData{
podVolumeClaims: &PodVolumeClaims{
boundClaims: []*v1.PersistentVolumeClaim{},
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
makePVC("pvc-dynamic-0", waitSCWithStorageCapacity.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
makePVC("pvc-dynamic-1", waitSCWithStorageCapacity.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
},
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{waitSCWithStorageCapacity.Name: {}},
},
podVolumesByNode: map[string]*PodVolumes{},
},
wantFilterStatus: []*framework.Status{
nil,
},
wantScores: []int64{
100,
},
},
{
name: "prefer node with least allocatable",
pod: makePod("pod-a").withPVCVolume("pvc-dynamic", "").Pod,
nodes: []*v1.Node{
makeNode("node-a").withLabel(nodeLabelKey, "node-a").Node,
makeNode("node-b").withLabel(nodeLabelKey, "node-b").Node,
makeNode("node-c").withLabel(nodeLabelKey, "node-c").Node,
},
pvcs: []*v1.PersistentVolumeClaim{
makePVC("pvc-dynamic", waitSCWithStorageCapacity.Name).withRequestStorage(resource.MustParse("10Gi")).PersistentVolumeClaim,
},
capacities: []*storagev1.CSIStorageCapacity{
makeCapacity("node-a", waitSCWithStorageCapacity.Name, makeNode("node-a").withLabel(nodeLabelKey, "node-a").Node, "100Gi", ""),
makeCapacity("node-b", waitSCWithStorageCapacity.Name, makeNode("node-b").withLabel(nodeLabelKey, "node-b").Node, "20Gi", ""),
makeCapacity("node-c", waitSCWithStorageCapacity.Name, makeNode("node-c").withLabel(nodeLabelKey, "node-c").Node, "10Gi", ""),
},
fts: feature.Features{
EnableStorageCapacityScoring: true,
},
wantPreFilterStatus: nil,
wantStateAfterPreFilter: &stateData{
podVolumeClaims: &PodVolumeClaims{
boundClaims: []*v1.PersistentVolumeClaim{},
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
makePVC("pvc-dynamic", waitSCWithStorageCapacity.Name).withRequestStorage(resource.MustParse("10Gi")).PersistentVolumeClaim,
},
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{waitSCWithStorageCapacity.Name: {}},
},
podVolumesByNode: map[string]*PodVolumes{},
},
wantFilterStatus: []*framework.Status{
nil,
nil,
nil,
},
wantScores: []int64{
10,
50,
100,
},
},
{
name: "prefer node with maximum allocatable",
pod: makePod("pod-a").withPVCVolume("pvc-dynamic", "").Pod,
nodes: []*v1.Node{
makeNode("node-a").withLabel(nodeLabelKey, "node-a").Node,
makeNode("node-b").withLabel(nodeLabelKey, "node-b").Node,
makeNode("node-c").withLabel(nodeLabelKey, "node-c").Node,
},
pvcs: []*v1.PersistentVolumeClaim{
makePVC("pvc-dynamic", waitSCWithStorageCapacity.Name).withRequestStorage(resource.MustParse("10Gi")).PersistentVolumeClaim,
},
capacities: []*storagev1.CSIStorageCapacity{
makeCapacity("node-a", waitSCWithStorageCapacity.Name, makeNode("node-a").withLabel(nodeLabelKey, "node-a").Node, "100Gi", ""),
makeCapacity("node-b", waitSCWithStorageCapacity.Name, makeNode("node-b").withLabel(nodeLabelKey, "node-b").Node, "20Gi", ""),
makeCapacity("node-c", waitSCWithStorageCapacity.Name, makeNode("node-c").withLabel(nodeLabelKey, "node-c").Node, "10Gi", ""),
},
fts: feature.Features{
EnableStorageCapacityScoring: true,
},
args: &config.VolumeBindingArgs{
BindTimeoutSeconds: 300,
Shape: []config.UtilizationShapePoint{
{
Utilization: 0,
Score: int32(config.MaxCustomPriorityScore),
},
{
Utilization: 100,
Score: 0,
},
},
},
wantPreFilterStatus: nil,
wantStateAfterPreFilter: &stateData{
podVolumeClaims: &PodVolumeClaims{
boundClaims: []*v1.PersistentVolumeClaim{},
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
makePVC("pvc-dynamic", waitSCWithStorageCapacity.Name).withRequestStorage(resource.MustParse("10Gi")).PersistentVolumeClaim,
},
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{waitSCWithStorageCapacity.Name: {}},
},
podVolumesByNode: map[string]*PodVolumes{},
},
wantFilterStatus: []*framework.Status{
nil,
nil,
nil,
},
wantScores: []int64{
90,
50,
0,
},
},
} }
for _, item := range table { for _, item := range table {
@ -740,7 +1001,7 @@ func TestVolumeBinding(t *testing.T) {
args = &config.VolumeBindingArgs{ args = &config.VolumeBindingArgs{
BindTimeoutSeconds: 300, BindTimeoutSeconds: 300,
} }
if item.fts.EnableVolumeCapacityPriority { if item.fts.EnableStorageCapacityScoring {
args.Shape = defaultShapePoint args.Shape = defaultShapePoint
} }
} }
@ -751,17 +1012,50 @@ func TestVolumeBinding(t *testing.T) {
} }
t.Log("Feed testing data and wait for them to be synced") t.Log("Feed testing data and wait for them to be synced")
client.StorageV1().StorageClasses().Create(ctx, immediateSC, metav1.CreateOptions{}) _, err = client.StorageV1().StorageClasses().Create(ctx, immediateSC, metav1.CreateOptions{})
client.StorageV1().StorageClasses().Create(ctx, waitSC, metav1.CreateOptions{}) if err != nil {
client.StorageV1().StorageClasses().Create(ctx, waitHDDSC, metav1.CreateOptions{}) t.Fatal(err)
}
_, err = client.StorageV1().StorageClasses().Create(ctx, waitSC, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
_, err = client.StorageV1().StorageClasses().Create(ctx, waitHDDSC, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
_, err = client.StorageV1().StorageClasses().Create(ctx, waitSCWithStorageCapacity, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
_, err = client.StorageV1().CSIDrivers().Create(ctx, driverWithStorageCapacity, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
for _, node := range item.nodes { for _, node := range item.nodes {
client.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{}) _, err = client.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
} }
for _, pvc := range item.pvcs { for _, pvc := range item.pvcs {
client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{}) _, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
} }
for _, pv := range item.pvs { for _, pv := range item.pvs {
client.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{}) _, err = client.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
}
for _, capacity := range item.capacities {
_, err = client.StorageV1().CSIStorageCapacities(capacity.Namespace).Create(ctx, capacity, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
} }
t.Log("Start informer factory after initialization") t.Log("Start informer factory after initialization")

View File

@ -159,17 +159,17 @@ type VolumeBindingArgs struct {
BindTimeoutSeconds *int64 `json:"bindTimeoutSeconds,omitempty"` BindTimeoutSeconds *int64 `json:"bindTimeoutSeconds,omitempty"`
// Shape specifies the points defining the score function shape, which is // Shape specifies the points defining the score function shape, which is
// used to score nodes based on the utilization of statically provisioned // used to score nodes based on the utilization of provisioned PVs.
// PVs. The utilization is calculated by dividing the total requested // The utilization is calculated by dividing the total requested
// storage of the pod by the total capacity of feasible PVs on each node. // storage of the pod by the total capacity of feasible PVs on each node.
// Each point contains utilization (ranges from 0 to 100) and its // Each point contains utilization (ranges from 0 to 100) and its
// associated score (ranges from 0 to 10). You can turn the priority by // associated score (ranges from 0 to 10). You can turn the priority by
// specifying different scores for different utilization numbers. // specifying different scores for different utilization numbers.
// The default shape points are: // The default shape points are:
// 1) 0 for 0 utilization // 1) 10 for 0 utilization
// 2) 10 for 100 utilization // 2) 0 for 100 utilization
// All points must be sorted in increasing order by utilization. // All points must be sorted in increasing order by utilization.
// +featureGate=VolumeCapacityPriority // +featureGate=StorageCapacityScoring
// +optional // +optional
// +listType=atomic // +listType=atomic
Shape []UtilizationShapePoint `json:"shape,omitempty"` Shape []UtilizationShapePoint `json:"shape,omitempty"`

View File

@ -1367,6 +1367,12 @@
lockToDefault: true lockToDefault: true
preRelease: GA preRelease: GA
version: "1.31" version: "1.31"
- name: StorageCapacityScoring
versionedSpecs:
- default: false
lockToDefault: false
preRelease: Alpha
version: "1.33"
- name: StorageNamespaceIndex - name: StorageNamespaceIndex
versionedSpecs: versionedSpecs:
- default: true - default: true
@ -1577,12 +1583,6 @@
lockToDefault: false lockToDefault: false
preRelease: Beta preRelease: Beta
version: "1.31" version: "1.31"
- name: VolumeCapacityPriority
versionedSpecs:
- default: false
lockToDefault: false
preRelease: Alpha
version: "1.21"
- name: WatchCacheInitializationPostStartHook - name: WatchCacheInitializationPostStartHook
versionedSpecs: versionedSpecs:
- default: false - default: false

View File

@ -16,7 +16,7 @@ limitations under the License.
package volumescheduling package volumescheduling
// This file tests the VolumeCapacityPriority feature. // This file tests the StorageCapacityScoring feature.
import ( import (
"context" "context"
@ -46,7 +46,7 @@ func mergeNodeLabels(node *v1.Node, labels map[string]string) *v1.Node {
return node return node
} }
func setupClusterForVolumeCapacityPriority(t *testing.T, nsName string, resyncPeriod time.Duration, provisionDelaySeconds int) *testConfig { func setupClusterForStorageCapacityScoring(t *testing.T, nsName string, resyncPeriod time.Duration, provisionDelaySeconds int) *testConfig {
testCtx := testutil.InitTestSchedulerWithOptions(t, testutil.InitTestAPIServer(t, nsName, nil), resyncPeriod) testCtx := testutil.InitTestSchedulerWithOptions(t, testutil.InitTestAPIServer(t, nsName, nil), resyncPeriod)
testutil.SyncSchedulerInformerFactory(testCtx) testutil.SyncSchedulerInformerFactory(testCtx)
go testCtx.Scheduler.Run(testCtx.Ctx) go testCtx.Scheduler.Run(testCtx.Ctx)
@ -75,10 +75,10 @@ func setupClusterForVolumeCapacityPriority(t *testing.T, nsName string, resyncPe
} }
} }
func TestVolumeCapacityPriority(t *testing.T) { func TestStorageCapacityScoring(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeCapacityPriority, true) featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StorageCapacityScoring, true)
config := setupClusterForVolumeCapacityPriority(t, "volume-capacity-priority", 0, 0) config := setupClusterForStorageCapacityScoring(t, "storage-capacity-scoring", 0, 0)
defer config.teardown() defer config.teardown()
tests := []struct { tests := []struct {
@ -90,7 +90,7 @@ func TestVolumeCapacityPriority(t *testing.T) {
wantNodeName string wantNodeName string
}{ }{
{ {
name: "local volumes with close capacity are preferred", name: "local volumes with max capacity are preferred",
pod: makePod("pod", config.ns, []string{"data"}), pod: makePod("pod", config.ns, []string{"data"}),
nodes: []*v1.Node{ nodes: []*v1.Node{
makeNode(0), makeNode(0),
@ -108,10 +108,10 @@ func TestVolumeCapacityPriority(t *testing.T) {
pvcs: []*v1.PersistentVolumeClaim{ pvcs: []*v1.PersistentVolumeClaim{
setPVCRequestStorage(makePVC("data", config.ns, &waitSSDSC.Name, ""), resource.MustParse("20Gi")), setPVCRequestStorage(makePVC("data", config.ns, &waitSSDSC.Name, ""), resource.MustParse("20Gi")),
}, },
wantNodeName: "node-2", wantNodeName: "node-0",
}, },
{ {
name: "local volumes with close capacity are preferred (multiple pvcs)", name: "local volumes with max capacity are preferred (multiple pvcs)",
pod: makePod("pod", config.ns, []string{"data-0", "data-1"}), pod: makePod("pod", config.ns, []string{"data-0", "data-1"}),
nodes: []*v1.Node{ nodes: []*v1.Node{
makeNode(0), makeNode(0),
@ -130,10 +130,10 @@ func TestVolumeCapacityPriority(t *testing.T) {
setPVCRequestStorage(makePVC("data-0", config.ns, &waitSSDSC.Name, ""), resource.MustParse("80Gi")), setPVCRequestStorage(makePVC("data-0", config.ns, &waitSSDSC.Name, ""), resource.MustParse("80Gi")),
setPVCRequestStorage(makePVC("data-1", config.ns, &waitSSDSC.Name, ""), resource.MustParse("80Gi")), setPVCRequestStorage(makePVC("data-1", config.ns, &waitSSDSC.Name, ""), resource.MustParse("80Gi")),
}, },
wantNodeName: "node-1", wantNodeName: "node-0",
}, },
{ {
name: "local volumes with close capacity are preferred (multiple pvcs, multiple classes)", name: "local volumes with max capacity are preferred (multiple pvcs, multiple classes)",
pod: makePod("pod", config.ns, []string{"data-0", "data-1"}), pod: makePod("pod", config.ns, []string{"data-0", "data-1"}),
nodes: []*v1.Node{ nodes: []*v1.Node{
makeNode(0), makeNode(0),
@ -152,10 +152,10 @@ func TestVolumeCapacityPriority(t *testing.T) {
setPVCRequestStorage(makePVC("data-0", config.ns, &waitSSDSC.Name, ""), resource.MustParse("80Gi")), setPVCRequestStorage(makePVC("data-0", config.ns, &waitSSDSC.Name, ""), resource.MustParse("80Gi")),
setPVCRequestStorage(makePVC("data-1", config.ns, &waitHDDSC.Name, ""), resource.MustParse("80Gi")), setPVCRequestStorage(makePVC("data-1", config.ns, &waitHDDSC.Name, ""), resource.MustParse("80Gi")),
}, },
wantNodeName: "node-1", wantNodeName: "node-0",
}, },
{ {
name: "zonal volumes with close capacity are preferred (multiple pvcs, multiple classes)", name: "zonal volumes with max capacity are preferred (multiple pvcs, multiple classes)",
pod: makePod("pod", config.ns, []string{"data-0", "data-1"}), pod: makePod("pod", config.ns, []string{"data-0", "data-1"}),
nodes: []*v1.Node{ nodes: []*v1.Node{
mergeNodeLabels(makeNode(0), map[string]string{ mergeNodeLabels(makeNode(0), map[string]string{
@ -201,7 +201,7 @@ func TestVolumeCapacityPriority(t *testing.T) {
setPVCRequestStorage(makePVC("data-0", config.ns, &waitSSDSC.Name, ""), resource.MustParse("80Gi")), setPVCRequestStorage(makePVC("data-0", config.ns, &waitSSDSC.Name, ""), resource.MustParse("80Gi")),
setPVCRequestStorage(makePVC("data-1", config.ns, &waitHDDSC.Name, ""), resource.MustParse("80Gi")), setPVCRequestStorage(makePVC("data-1", config.ns, &waitHDDSC.Name, ""), resource.MustParse("80Gi")),
}, },
wantNodeName: "node-1", wantNodeName: "node-0",
}, },
} }