mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 14:07:14 +00:00
Merge pull request #128184 from cupnes/add-storage-capacity-scoring
KEP-4049: Add storage capacity scoring to VolumeBinding plugin
This commit is contained in:
commit
83c5f99f97
@ -39,10 +39,8 @@ import (
|
||||
basecompatibility "k8s.io/component-base/compatibility"
|
||||
componentbaseconfig "k8s.io/component-base/config"
|
||||
"k8s.io/component-base/featuregate"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
configv1 "k8s.io/kube-scheduler/config/v1"
|
||||
"k8s.io/kubernetes/cmd/kube-scheduler/app/options"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config/testing/defaults"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
@ -222,19 +220,6 @@ leaderElection:
|
||||
wantErr bool
|
||||
wantFeaturesGates map[string]bool
|
||||
}{
|
||||
{
|
||||
name: "default config with an alpha feature enabled",
|
||||
flags: []string{
|
||||
"--kubeconfig", configKubeconfig,
|
||||
"--feature-gates=VolumeCapacityPriority=true",
|
||||
},
|
||||
wantPlugins: map[string]*config.Plugins{
|
||||
"default-scheduler": defaults.ExpandedPluginsV1,
|
||||
},
|
||||
restoreFeatures: map[featuregate.Feature]bool{
|
||||
features.VolumeCapacityPriority: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "component configuration v1 with only scheduler name configured",
|
||||
flags: []string{
|
||||
@ -435,9 +420,6 @@ leaderElection:
|
||||
|
||||
for _, tc := range testcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
for k, v := range tc.restoreFeatures {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, k, v)
|
||||
}
|
||||
componentGlobalsRegistry := basecompatibility.NewComponentGlobalsRegistry()
|
||||
verKube := basecompatibility.NewEffectiveVersionFromString("1.32", "1.31", "1.31")
|
||||
fg := feature.DefaultFeatureGate.DeepCopy()
|
||||
|
@ -692,6 +692,13 @@ const (
|
||||
// Enables trafficDistribution field on Services.
|
||||
ServiceTrafficDistribution featuregate.Feature = "ServiceTrafficDistribution"
|
||||
|
||||
// owner: @cupnes
|
||||
// kep: https://kep.k8s.io/4049
|
||||
//
|
||||
// Enables scoring nodes by available storage capacity with
|
||||
// StorageCapacityScoring feature gate.
|
||||
StorageCapacityScoring featuregate.Feature = "StorageCapacityScoring"
|
||||
|
||||
// owner: @gjkim42 @SergeyKanzhelev @matthyx @tzneal
|
||||
// kep: http://kep.k8s.io/753
|
||||
//
|
||||
@ -798,9 +805,6 @@ const (
|
||||
// Enables user specified volume attributes for persistent volumes, like iops and throughput.
|
||||
VolumeAttributesClass featuregate.Feature = "VolumeAttributesClass"
|
||||
|
||||
// owner: @cofyc
|
||||
VolumeCapacityPriority featuregate.Feature = "VolumeCapacityPriority"
|
||||
|
||||
// owner: @ksubrmnn
|
||||
//
|
||||
// Allows kube-proxy to create DSR loadbalancers for Windows
|
||||
|
@ -766,6 +766,10 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate
|
||||
{Version: version.MustParse("1.31"), Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.31, remove in 1.33
|
||||
},
|
||||
|
||||
StorageCapacityScoring: {
|
||||
{Version: version.MustParse("1.33"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
StorageVersionMigrator: {
|
||||
{Version: version.MustParse("1.30"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
@ -836,10 +840,6 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate
|
||||
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
VolumeCapacityPriority: {
|
||||
{Version: version.MustParse("1.21"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
WinDSR: {
|
||||
{Version: version.MustParse("1.14"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
2
pkg/generated/openapi/zz_generated.openapi.go
generated
2
pkg/generated/openapi/zz_generated.openapi.go
generated
@ -64065,7 +64065,7 @@ func schema_k8sio_kube_scheduler_config_v1_VolumeBindingArgs(ref common.Referenc
|
||||
},
|
||||
},
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Shape specifies the points defining the score function shape, which is used to score nodes based on the utilization of statically provisioned PVs. The utilization is calculated by dividing the total requested storage of the pod by the total capacity of feasible PVs on each node. Each point contains utilization (ranges from 0 to 100) and its associated score (ranges from 0 to 10). You can turn the priority by specifying different scores for different utilization numbers. The default shape points are: 1) 0 for 0 utilization 2) 10 for 100 utilization All points must be sorted in increasing order by utilization.",
|
||||
Description: "Shape specifies the points defining the score function shape, which is used to score nodes based on the utilization of provisioned PVs. The utilization is calculated by dividing the total requested storage of the pod by the total capacity of feasible PVs on each node. Each point contains utilization (ranges from 0 to 100) and its associated score (ranges from 0 to 10). You can turn the priority by specifying different scores for different utilization numbers. The default shape points are: 1) 10 for 0 utilization 2) 0 for 100 utilization All points must be sorted in increasing order by utilization.",
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
|
@ -163,7 +163,7 @@ type VolumeBindingArgs struct {
|
||||
// 1) 0 for 0 utilization
|
||||
// 2) 10 for 100 utilization
|
||||
// All points must be sorted in increasing order by utilization.
|
||||
// +featureGate=VolumeCapacityPriority
|
||||
// +featureGate=StorageCapacityScoring
|
||||
// +optional
|
||||
Shape []UtilizationShapePoint
|
||||
}
|
||||
|
@ -192,15 +192,15 @@ func SetDefaults_VolumeBindingArgs(obj *configv1.VolumeBindingArgs) {
|
||||
if obj.BindTimeoutSeconds == nil {
|
||||
obj.BindTimeoutSeconds = ptr.To[int64](600)
|
||||
}
|
||||
if len(obj.Shape) == 0 && feature.DefaultFeatureGate.Enabled(features.VolumeCapacityPriority) {
|
||||
if len(obj.Shape) == 0 && feature.DefaultFeatureGate.Enabled(features.StorageCapacityScoring) {
|
||||
obj.Shape = []configv1.UtilizationShapePoint{
|
||||
{
|
||||
Utilization: 0,
|
||||
Score: 0,
|
||||
Score: int32(config.MaxCustomPriorityScore),
|
||||
},
|
||||
{
|
||||
Utilization: 100,
|
||||
Score: int32(config.MaxCustomPriorityScore),
|
||||
Score: 0,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -810,9 +810,9 @@ func TestPluginArgsDefaults(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "VolumeBindingArgs empty, VolumeCapacityPriority disabled",
|
||||
name: "VolumeBindingArgs empty, StorageCapacityScoring disabled",
|
||||
features: map[featuregate.Feature]bool{
|
||||
features.VolumeCapacityPriority: false,
|
||||
features.StorageCapacityScoring: false,
|
||||
},
|
||||
in: &configv1.VolumeBindingArgs{},
|
||||
want: &configv1.VolumeBindingArgs{
|
||||
@ -820,16 +820,16 @@ func TestPluginArgsDefaults(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "VolumeBindingArgs empty, VolumeCapacityPriority enabled",
|
||||
name: "VolumeBindingArgs empty, StorageCapacityScoring enabled",
|
||||
features: map[featuregate.Feature]bool{
|
||||
features.VolumeCapacityPriority: true,
|
||||
features.StorageCapacityScoring: true,
|
||||
},
|
||||
in: &configv1.VolumeBindingArgs{},
|
||||
want: &configv1.VolumeBindingArgs{
|
||||
BindTimeoutSeconds: ptr.To[int64](600),
|
||||
Shape: []configv1.UtilizationShapePoint{
|
||||
{Utilization: 0, Score: 0},
|
||||
{Utilization: 100, Score: 10},
|
||||
{Utilization: 0, Score: 10},
|
||||
{Utilization: 100, Score: 0},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -261,13 +261,13 @@ func ValidateNodeAffinityArgs(path *field.Path, args *config.NodeAffinityArgs) e
|
||||
|
||||
// VolumeBindingArgsValidationOptions contains the different settings for validation.
|
||||
type VolumeBindingArgsValidationOptions struct {
|
||||
AllowVolumeCapacityPriority bool
|
||||
AllowStorageCapacityScoring bool
|
||||
}
|
||||
|
||||
// ValidateVolumeBindingArgs validates that VolumeBindingArgs are set correctly.
|
||||
func ValidateVolumeBindingArgs(path *field.Path, args *config.VolumeBindingArgs) error {
|
||||
return ValidateVolumeBindingArgsWithOptions(path, args, VolumeBindingArgsValidationOptions{
|
||||
AllowVolumeCapacityPriority: utilfeature.DefaultFeatureGate.Enabled(features.VolumeCapacityPriority),
|
||||
AllowStorageCapacityScoring: utilfeature.DefaultFeatureGate.Enabled(features.StorageCapacityScoring),
|
||||
})
|
||||
}
|
||||
|
||||
@ -279,13 +279,13 @@ func ValidateVolumeBindingArgsWithOptions(path *field.Path, args *config.VolumeB
|
||||
allErrs = append(allErrs, field.Invalid(path.Child("bindTimeoutSeconds"), args.BindTimeoutSeconds, "invalid BindTimeoutSeconds, should not be a negative value"))
|
||||
}
|
||||
|
||||
if opts.AllowVolumeCapacityPriority {
|
||||
if opts.AllowStorageCapacityScoring {
|
||||
allErrs = append(allErrs, validateFunctionShape(args.Shape, path.Child("shape"))...)
|
||||
} else if args.Shape != nil {
|
||||
// When the feature is off, return an error if the config is not nil.
|
||||
// This prevents unexpected configuration from taking effect when the
|
||||
// feature turns on in the future.
|
||||
allErrs = append(allErrs, field.Invalid(path.Child("shape"), args.Shape, "unexpected field `shape`, remove it or turn on the feature gate VolumeCapacityPriority"))
|
||||
allErrs = append(allErrs, field.Invalid(path.Child("shape"), args.Shape, "unexpected field `shape`, remove it or turn on the feature gate StorageCapacityScoring"))
|
||||
}
|
||||
return allErrs.ToAggregate()
|
||||
}
|
||||
|
@ -559,9 +559,9 @@ func TestValidateVolumeBindingArgs(t *testing.T) {
|
||||
}}),
|
||||
},
|
||||
{
|
||||
name: "[VolumeCapacityPriority=off] shape should be nil when the feature is off",
|
||||
name: "[StorageCapacityScoring=off] shape should be nil when the feature is off",
|
||||
features: map[featuregate.Feature]bool{
|
||||
features.VolumeCapacityPriority: false,
|
||||
features.StorageCapacityScoring: false,
|
||||
},
|
||||
args: config.VolumeBindingArgs{
|
||||
BindTimeoutSeconds: 10,
|
||||
@ -569,9 +569,9 @@ func TestValidateVolumeBindingArgs(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "[VolumeCapacityPriority=off] error if the shape is not nil when the feature is off",
|
||||
name: "[StorageCapacityScoring=off] error if the shape is not nil when the feature is off",
|
||||
features: map[featuregate.Feature]bool{
|
||||
features.VolumeCapacityPriority: false,
|
||||
features.StorageCapacityScoring: false,
|
||||
},
|
||||
args: config.VolumeBindingArgs{
|
||||
BindTimeoutSeconds: 10,
|
||||
@ -586,9 +586,9 @@ func TestValidateVolumeBindingArgs(t *testing.T) {
|
||||
}}),
|
||||
},
|
||||
{
|
||||
name: "[VolumeCapacityPriority=on] shape should not be empty",
|
||||
name: "[StorageCapacityScoring=on] shape should not be empty",
|
||||
features: map[featuregate.Feature]bool{
|
||||
features.VolumeCapacityPriority: true,
|
||||
features.StorageCapacityScoring: true,
|
||||
},
|
||||
args: config.VolumeBindingArgs{
|
||||
BindTimeoutSeconds: 10,
|
||||
@ -600,9 +600,9 @@ func TestValidateVolumeBindingArgs(t *testing.T) {
|
||||
}}),
|
||||
},
|
||||
{
|
||||
name: "[VolumeCapacityPriority=on] shape points must be sorted in increasing order",
|
||||
name: "[StorageCapacityScoring=on] shape points must be sorted in increasing order",
|
||||
features: map[featuregate.Feature]bool{
|
||||
features.VolumeCapacityPriority: true,
|
||||
features.StorageCapacityScoring: true,
|
||||
},
|
||||
args: config.VolumeBindingArgs{
|
||||
BindTimeoutSeconds: 10,
|
||||
@ -618,9 +618,9 @@ func TestValidateVolumeBindingArgs(t *testing.T) {
|
||||
}}),
|
||||
},
|
||||
{
|
||||
name: "[VolumeCapacityPriority=on] shape point: invalid utilization and score",
|
||||
name: "[StorageCapacityScoring=on] shape point: invalid utilization and score",
|
||||
features: map[featuregate.Feature]bool{
|
||||
features.VolumeCapacityPriority: true,
|
||||
features.StorageCapacityScoring: true,
|
||||
},
|
||||
args: config.VolumeBindingArgs{
|
||||
BindTimeoutSeconds: 10,
|
||||
|
@ -23,7 +23,6 @@ type Features struct {
|
||||
EnableDRAPrioritizedList bool
|
||||
EnableDRAAdminAccess bool
|
||||
EnableDynamicResourceAllocation bool
|
||||
EnableVolumeCapacityPriority bool
|
||||
EnableVolumeAttributesClass bool
|
||||
EnableCSIMigrationPortworx bool
|
||||
EnableNodeInclusionPolicyInPodTopologySpread bool
|
||||
@ -33,4 +32,5 @@ type Features struct {
|
||||
EnableSchedulingQueueHint bool
|
||||
EnableAsyncPreemption bool
|
||||
EnablePodLevelResources bool
|
||||
EnableStorageCapacityScoring bool
|
||||
}
|
||||
|
@ -49,7 +49,6 @@ func NewInTreeRegistry() runtime.Registry {
|
||||
EnableDRAPrioritizedList: feature.DefaultFeatureGate.Enabled(features.DRAPrioritizedList),
|
||||
EnableDRAAdminAccess: feature.DefaultFeatureGate.Enabled(features.DRAAdminAccess),
|
||||
EnableDynamicResourceAllocation: feature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation),
|
||||
EnableVolumeCapacityPriority: feature.DefaultFeatureGate.Enabled(features.VolumeCapacityPriority),
|
||||
EnableVolumeAttributesClass: feature.DefaultFeatureGate.Enabled(features.VolumeAttributesClass),
|
||||
EnableCSIMigrationPortworx: feature.DefaultFeatureGate.Enabled(features.CSIMigrationPortworx),
|
||||
EnableNodeInclusionPolicyInPodTopologySpread: feature.DefaultFeatureGate.Enabled(features.NodeInclusionPolicyInPodTopologySpread),
|
||||
@ -59,6 +58,7 @@ func NewInTreeRegistry() runtime.Registry {
|
||||
EnableSchedulingQueueHint: feature.DefaultFeatureGate.Enabled(features.SchedulerQueueingHints),
|
||||
EnableAsyncPreemption: feature.DefaultFeatureGate.Enabled(features.SchedulerAsyncPreemption),
|
||||
EnablePodLevelResources: feature.DefaultFeatureGate.Enabled(features.PodLevelResources),
|
||||
EnableStorageCapacityScoring: feature.DefaultFeatureGate.Enabled(features.StorageCapacityScoring),
|
||||
}
|
||||
|
||||
registry := runtime.Registry{
|
||||
|
@ -102,13 +102,19 @@ func (b *BindingInfo) StorageResource() *StorageResource {
|
||||
}
|
||||
}
|
||||
|
||||
// DynamicProvision represents a dynamically provisioned volume.
|
||||
type DynamicProvision struct {
|
||||
PVC *v1.PersistentVolumeClaim
|
||||
NodeCapacity *storagev1.CSIStorageCapacity
|
||||
}
|
||||
|
||||
// PodVolumes holds pod's volumes information used in volume scheduling.
|
||||
type PodVolumes struct {
|
||||
// StaticBindings are binding decisions for PVCs which can be bound to
|
||||
// pre-provisioned static PVs.
|
||||
StaticBindings []*BindingInfo
|
||||
// DynamicProvisions are PVCs that require dynamic provisioning
|
||||
DynamicProvisions []*v1.PersistentVolumeClaim
|
||||
DynamicProvisions []*DynamicProvision
|
||||
}
|
||||
|
||||
// InTreeToCSITranslator contains methods required to check migratable status
|
||||
@ -310,7 +316,7 @@ func (b *volumeBinder) FindPodVolumes(logger klog.Logger, pod *v1.Pod, podVolume
|
||||
|
||||
var (
|
||||
staticBindings []*BindingInfo
|
||||
dynamicProvisions []*v1.PersistentVolumeClaim
|
||||
dynamicProvisions []*DynamicProvision
|
||||
)
|
||||
defer func() {
|
||||
// Although we do not distinguish nil from empty in this function, for
|
||||
@ -377,6 +383,16 @@ func (b *volumeBinder) FindPodVolumes(logger klog.Logger, pod *v1.Pod, podVolume
|
||||
return
|
||||
}
|
||||
|
||||
// ConvertDynamicProvisionsToPVCs converts a slice of *DynamicProvision to a
|
||||
// slice of PersistentVolumeClaim
|
||||
func convertDynamicProvisionsToPVCs(dynamicProvisions []*DynamicProvision) []*v1.PersistentVolumeClaim {
|
||||
pvcs := make([]*v1.PersistentVolumeClaim, 0, len(dynamicProvisions))
|
||||
for _, dynamicProvision := range dynamicProvisions {
|
||||
pvcs = append(pvcs, dynamicProvision.PVC)
|
||||
}
|
||||
return pvcs
|
||||
}
|
||||
|
||||
// AssumePodVolumes will take the matching PVs and PVCs to provision in pod's
|
||||
// volume information for the chosen node, and:
|
||||
// 1. Update the pvCache with the new prebound PV.
|
||||
@ -423,20 +439,21 @@ func (b *volumeBinder) AssumePodVolumes(logger klog.Logger, assumedPod *v1.Pod,
|
||||
}
|
||||
|
||||
// Assume PVCs
|
||||
newProvisionedPVCs := []*v1.PersistentVolumeClaim{}
|
||||
for _, claim := range podVolumes.DynamicProvisions {
|
||||
newProvisionedPVCs := []*DynamicProvision{}
|
||||
for _, dynamicProvision := range podVolumes.DynamicProvisions {
|
||||
// The claims from method args can be pointing to watcher cache. We must not
|
||||
// modify these, therefore create a copy.
|
||||
claimClone := claim.DeepCopy()
|
||||
claimClone := dynamicProvision.PVC.DeepCopy()
|
||||
metav1.SetMetaDataAnnotation(&claimClone.ObjectMeta, volume.AnnSelectedNode, nodeName)
|
||||
err = b.pvcCache.Assume(claimClone)
|
||||
if err != nil {
|
||||
pvcs := convertDynamicProvisionsToPVCs(newProvisionedPVCs)
|
||||
b.revertAssumedPVs(newBindings)
|
||||
b.revertAssumedPVCs(newProvisionedPVCs)
|
||||
b.revertAssumedPVCs(pvcs)
|
||||
return
|
||||
}
|
||||
|
||||
newProvisionedPVCs = append(newProvisionedPVCs, claimClone)
|
||||
newProvisionedPVCs = append(newProvisionedPVCs, &DynamicProvision{PVC: claimClone})
|
||||
}
|
||||
|
||||
podVolumes.StaticBindings = newBindings
|
||||
@ -446,8 +463,9 @@ func (b *volumeBinder) AssumePodVolumes(logger klog.Logger, assumedPod *v1.Pod,
|
||||
|
||||
// RevertAssumedPodVolumes will revert assumed PV and PVC cache.
|
||||
func (b *volumeBinder) RevertAssumedPodVolumes(podVolumes *PodVolumes) {
|
||||
pvcs := convertDynamicProvisionsToPVCs(podVolumes.DynamicProvisions)
|
||||
b.revertAssumedPVs(podVolumes.StaticBindings)
|
||||
b.revertAssumedPVCs(podVolumes.DynamicProvisions)
|
||||
b.revertAssumedPVCs(pvcs)
|
||||
}
|
||||
|
||||
// BindPodVolumes gets the cached bindings and PVCs to provision in pod's volumes information,
|
||||
@ -464,7 +482,7 @@ func (b *volumeBinder) BindPodVolumes(ctx context.Context, assumedPod *v1.Pod, p
|
||||
}()
|
||||
|
||||
bindings := podVolumes.StaticBindings
|
||||
claimsToProvision := podVolumes.DynamicProvisions
|
||||
claimsToProvision := convertDynamicProvisionsToPVCs(podVolumes.DynamicProvisions)
|
||||
|
||||
// Start API operations
|
||||
err = b.bindAPIUpdate(ctx, assumedPod, bindings, claimsToProvision)
|
||||
@ -886,8 +904,8 @@ func (b *volumeBinder) findMatchingVolumes(logger klog.Logger, pod *v1.Pod, clai
|
||||
// checkVolumeProvisions checks given unbound claims (the claims have gone through func
|
||||
// findMatchingVolumes, and do not have matching volumes for binding), and return true
|
||||
// if all of the claims are eligible for dynamic provision.
|
||||
func (b *volumeBinder) checkVolumeProvisions(logger klog.Logger, pod *v1.Pod, claimsToProvision []*v1.PersistentVolumeClaim, node *v1.Node) (provisionSatisfied, sufficientStorage bool, dynamicProvisions []*v1.PersistentVolumeClaim, err error) {
|
||||
dynamicProvisions = []*v1.PersistentVolumeClaim{}
|
||||
func (b *volumeBinder) checkVolumeProvisions(logger klog.Logger, pod *v1.Pod, claimsToProvision []*v1.PersistentVolumeClaim, node *v1.Node) (provisionSatisfied, sufficientStorage bool, dynamicProvisions []*DynamicProvision, err error) {
|
||||
dynamicProvisions = []*DynamicProvision{}
|
||||
|
||||
// We return early with provisionedClaims == nil if a check
|
||||
// fails or we encounter an error.
|
||||
@ -915,7 +933,7 @@ func (b *volumeBinder) checkVolumeProvisions(logger klog.Logger, pod *v1.Pod, cl
|
||||
}
|
||||
|
||||
// Check storage capacity.
|
||||
sufficient, err := b.hasEnoughCapacity(logger, provisioner, claim, class, node)
|
||||
sufficient, capacity, err := b.hasEnoughCapacity(logger, provisioner, claim, class, node)
|
||||
if err != nil {
|
||||
return false, false, nil, err
|
||||
}
|
||||
@ -924,8 +942,10 @@ func (b *volumeBinder) checkVolumeProvisions(logger klog.Logger, pod *v1.Pod, cl
|
||||
return true, false, nil, nil
|
||||
}
|
||||
|
||||
dynamicProvisions = append(dynamicProvisions, claim)
|
||||
|
||||
dynamicProvisions = append(dynamicProvisions, &DynamicProvision{
|
||||
PVC: claim,
|
||||
NodeCapacity: capacity,
|
||||
})
|
||||
}
|
||||
logger.V(4).Info("Provisioning for claims of pod that has no matching volumes...", "claimCount", len(claimsToProvision), "pod", klog.KObj(pod), "node", klog.KObj(node))
|
||||
|
||||
@ -945,12 +965,12 @@ func (b *volumeBinder) revertAssumedPVCs(claims []*v1.PersistentVolumeClaim) {
|
||||
}
|
||||
|
||||
// hasEnoughCapacity checks whether the provisioner has enough capacity left for a new volume of the given size
|
||||
// that is available from the node.
|
||||
func (b *volumeBinder) hasEnoughCapacity(logger klog.Logger, provisioner string, claim *v1.PersistentVolumeClaim, storageClass *storagev1.StorageClass, node *v1.Node) (bool, error) {
|
||||
// that is available from the node. This function returns the node capacity based on the PVC's storage class.
|
||||
func (b *volumeBinder) hasEnoughCapacity(logger klog.Logger, provisioner string, claim *v1.PersistentVolumeClaim, storageClass *storagev1.StorageClass, node *v1.Node) (bool, *storagev1.CSIStorageCapacity, error) {
|
||||
quantity, ok := claim.Spec.Resources.Requests[v1.ResourceStorage]
|
||||
if !ok {
|
||||
// No capacity to check for.
|
||||
return true, nil
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// Only enabled for CSI drivers which opt into it.
|
||||
@ -960,19 +980,19 @@ func (b *volumeBinder) hasEnoughCapacity(logger klog.Logger, provisioner string,
|
||||
// Either the provisioner is not a CSI driver or the driver does not
|
||||
// opt into storage capacity scheduling. Either way, skip
|
||||
// capacity checking.
|
||||
return true, nil
|
||||
return true, nil, nil
|
||||
}
|
||||
return false, err
|
||||
return false, nil, err
|
||||
}
|
||||
if driver.Spec.StorageCapacity == nil || !*driver.Spec.StorageCapacity {
|
||||
return true, nil
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// Look for a matching CSIStorageCapacity object(s).
|
||||
// TODO (for beta): benchmark this and potentially introduce some kind of lookup structure (https://github.com/kubernetes/enhancements/issues/1698#issuecomment-654356718).
|
||||
capacities, err := b.csiStorageCapacityLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
sizeInBytes := quantity.Value()
|
||||
@ -981,7 +1001,7 @@ func (b *volumeBinder) hasEnoughCapacity(logger klog.Logger, provisioner string,
|
||||
capacitySufficient(capacity, sizeInBytes) &&
|
||||
b.nodeHasAccess(logger, node, capacity) {
|
||||
// Enough capacity found.
|
||||
return true, nil
|
||||
return true, capacity, nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -989,7 +1009,7 @@ func (b *volumeBinder) hasEnoughCapacity(logger klog.Logger, provisioner string,
|
||||
// they had to be rejected. Log that above? But that might be a lot of log output...
|
||||
logger.V(4).Info("Node has no accessible CSIStorageCapacity with enough capacity for PVC",
|
||||
"node", klog.KObj(node), "PVC", klog.KObj(claim), "size", sizeInBytes, "storageClass", klog.KObj(storageClass))
|
||||
return false, nil
|
||||
return false, nil, nil
|
||||
}
|
||||
|
||||
func capacitySufficient(capacity *storagev1.CSIStorageCapacity, sizeInBytes int64) bool {
|
||||
|
@ -122,6 +122,11 @@ var (
|
||||
|
||||
// node topology for CSI migration
|
||||
zone1Labels = map[string]string{v1.LabelFailureDomainBetaZone: "us-east-1", v1.LabelFailureDomainBetaRegion: "us-east-1a"}
|
||||
|
||||
// csiCapacity objects
|
||||
networkAttachedCapacity = makeCapacity("net", waitClassWithProvisioner, nil, "1Gi", "")
|
||||
node1Capacity = makeCapacity("net", waitClassWithProvisioner, node1, "1Gi", "")
|
||||
node2Capacity = makeCapacity("net", waitClassWithProvisioner, node2, "1Gi", "")
|
||||
)
|
||||
|
||||
type testEnv struct {
|
||||
@ -396,14 +401,14 @@ func (env *testEnv) assumeVolumes(t *testing.T, node string, pod *v1.Pod, bindin
|
||||
}
|
||||
}
|
||||
|
||||
func (env *testEnv) validatePodCache(t *testing.T, node string, pod *v1.Pod, podVolumes *PodVolumes, expectedBindings []*BindingInfo, expectedProvisionings []*v1.PersistentVolumeClaim) {
|
||||
func (env *testEnv) validatePodCache(t *testing.T, node string, pod *v1.Pod, podVolumes *PodVolumes, expectedBindings []*BindingInfo, expectedProvisionings []*DynamicProvision) {
|
||||
var (
|
||||
bindings []*BindingInfo
|
||||
provisionedClaims []*v1.PersistentVolumeClaim
|
||||
dynamicProvisions []*DynamicProvision
|
||||
)
|
||||
if podVolumes != nil {
|
||||
bindings = podVolumes.StaticBindings
|
||||
provisionedClaims = podVolumes.DynamicProvisions
|
||||
dynamicProvisions = podVolumes.DynamicProvisions
|
||||
}
|
||||
if aLen, eLen := len(bindings), len(expectedBindings); aLen != eLen {
|
||||
t.Errorf("expected %v bindings, got %v", eLen, aLen)
|
||||
@ -427,17 +432,17 @@ func (env *testEnv) validatePodCache(t *testing.T, node string, pod *v1.Pod, pod
|
||||
}
|
||||
}
|
||||
|
||||
if aLen, eLen := len(provisionedClaims), len(expectedProvisionings); aLen != eLen {
|
||||
if aLen, eLen := len(dynamicProvisions), len(expectedProvisionings); aLen != eLen {
|
||||
t.Errorf("expected %v provisioned claims, got %v", eLen, aLen)
|
||||
} else if expectedProvisionings == nil && provisionedClaims != nil {
|
||||
} else if expectedProvisionings == nil && dynamicProvisions != nil {
|
||||
// nil and empty are different
|
||||
t.Error("expected nil provisionings, got empty")
|
||||
} else if expectedProvisionings != nil && provisionedClaims == nil {
|
||||
} else if expectedProvisionings != nil && dynamicProvisions == nil {
|
||||
// nil and empty are different
|
||||
t.Error("expected empty provisionings, got nil")
|
||||
} else {
|
||||
for i := 0; i < aLen; i++ {
|
||||
if diff := cmp.Diff(expectedProvisionings[i], provisionedClaims[i]); diff != "" {
|
||||
if diff := cmp.Diff(expectedProvisionings[i], dynamicProvisions[i]); diff != "" {
|
||||
t.Errorf("provisioned claims doesn't match (-want, +got):\n%s", diff)
|
||||
}
|
||||
}
|
||||
@ -1041,7 +1046,7 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) {
|
||||
|
||||
// Expected podBindingCache fields
|
||||
expectedBindings []*BindingInfo
|
||||
expectedProvisions []*v1.PersistentVolumeClaim
|
||||
expectedProvisions []*DynamicProvision
|
||||
|
||||
// Expected return values
|
||||
reasons ConflictReasons
|
||||
@ -1051,26 +1056,26 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) {
|
||||
scenarios := map[string]scenarioType{
|
||||
"one-provisioned": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
expectedProvisions: []*DynamicProvision{{PVC: provisionedPVC}},
|
||||
needsCapacity: true,
|
||||
},
|
||||
"two-unbound-pvcs,one-matched,one-provisioned": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
expectedBindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1a)},
|
||||
expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
expectedProvisions: []*DynamicProvision{{PVC: provisionedPVC}},
|
||||
needsCapacity: true,
|
||||
},
|
||||
"one-bound,one-provisioned": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{boundPVC, provisionedPVC},
|
||||
pvs: []*v1.PersistentVolume{pvBound},
|
||||
expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
expectedProvisions: []*DynamicProvision{{PVC: provisionedPVC}},
|
||||
needsCapacity: true,
|
||||
},
|
||||
"one-binding,one-selected-node": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{boundPVC, selectedNodePVC},
|
||||
pvs: []*v1.PersistentVolume{pvBound},
|
||||
expectedProvisions: []*v1.PersistentVolumeClaim{selectedNodePVC},
|
||||
expectedProvisions: []*DynamicProvision{{PVC: selectedNodePVC}},
|
||||
needsCapacity: true,
|
||||
},
|
||||
"immediate-unbound-pvc": {
|
||||
@ -1080,7 +1085,7 @@ func TestFindPodVolumesWithProvisioning(t *testing.T) {
|
||||
"one-immediate-bound,one-provisioned": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{immediateBoundPVC, provisionedPVC},
|
||||
pvs: []*v1.PersistentVolume{pvBoundImmediate},
|
||||
expectedProvisions: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
expectedProvisions: []*DynamicProvision{{PVC: provisionedPVC}},
|
||||
needsCapacity: true,
|
||||
},
|
||||
"invalid-provisioner": {
|
||||
@ -1266,17 +1271,17 @@ func TestFindPodVolumesWithCSIMigration(t *testing.T) {
|
||||
func TestAssumePodVolumes(t *testing.T) {
|
||||
type scenarioType struct {
|
||||
// Inputs
|
||||
podPVCs []*v1.PersistentVolumeClaim
|
||||
pvs []*v1.PersistentVolume
|
||||
bindings []*BindingInfo
|
||||
provisionedPVCs []*v1.PersistentVolumeClaim
|
||||
podPVCs []*v1.PersistentVolumeClaim
|
||||
pvs []*v1.PersistentVolume
|
||||
bindings []*BindingInfo
|
||||
dynamicProvisions []*DynamicProvision
|
||||
|
||||
// Expected return values
|
||||
shouldFail bool
|
||||
expectedAllBound bool
|
||||
|
||||
expectedBindings []*BindingInfo
|
||||
expectedProvisionings []*v1.PersistentVolumeClaim
|
||||
expectedProvisionings []*DynamicProvision
|
||||
}
|
||||
scenarios := map[string]scenarioType{
|
||||
"all-bound": {
|
||||
@ -1289,21 +1294,21 @@ func TestAssumePodVolumes(t *testing.T) {
|
||||
bindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1a)},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
expectedBindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1aBound)},
|
||||
expectedProvisionings: []*v1.PersistentVolumeClaim{},
|
||||
expectedProvisionings: []*DynamicProvision{},
|
||||
},
|
||||
"two-bindings": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, unboundPVC2},
|
||||
bindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1a), makeBinding(unboundPVC2, pvNode1b)},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a, pvNode1b},
|
||||
expectedBindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1aBound), makeBinding(unboundPVC2, pvNode1bBound)},
|
||||
expectedProvisionings: []*v1.PersistentVolumeClaim{},
|
||||
expectedProvisionings: []*DynamicProvision{},
|
||||
},
|
||||
"pv-already-bound": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
bindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1aBound)},
|
||||
pvs: []*v1.PersistentVolume{pvNode1aBound},
|
||||
expectedBindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1aBound)},
|
||||
expectedProvisionings: []*v1.PersistentVolumeClaim{},
|
||||
expectedProvisionings: []*DynamicProvision{},
|
||||
},
|
||||
"tmpupdate-failed": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC},
|
||||
@ -1315,16 +1320,16 @@ func TestAssumePodVolumes(t *testing.T) {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC},
|
||||
bindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1a)},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
dynamicProvisions: []*DynamicProvision{{PVC: provisionedPVC}},
|
||||
expectedBindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1aBound)},
|
||||
expectedProvisionings: []*v1.PersistentVolumeClaim{selectedNodePVC},
|
||||
expectedProvisionings: []*DynamicProvision{{PVC: selectedNodePVC}},
|
||||
},
|
||||
"one-binding, one-provision-tmpupdate-failed": {
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVCHigherVersion},
|
||||
bindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1a)},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
provisionedPVCs: []*v1.PersistentVolumeClaim{provisionedPVC2},
|
||||
shouldFail: true,
|
||||
podPVCs: []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVCHigherVersion},
|
||||
bindings: []*BindingInfo{makeBinding(unboundPVC, pvNode1a)},
|
||||
pvs: []*v1.PersistentVolume{pvNode1a},
|
||||
dynamicProvisions: []*DynamicProvision{{PVC: provisionedPVC2}},
|
||||
shouldFail: true,
|
||||
},
|
||||
}
|
||||
|
||||
@ -1342,7 +1347,7 @@ func TestAssumePodVolumes(t *testing.T) {
|
||||
withPVCSVolume(scenario.podPVCs).Pod
|
||||
podVolumes := &PodVolumes{
|
||||
StaticBindings: scenario.bindings,
|
||||
DynamicProvisions: scenario.provisionedPVCs,
|
||||
DynamicProvisions: scenario.dynamicProvisions,
|
||||
}
|
||||
testEnv.initVolumes(scenario.pvs, scenario.pvs)
|
||||
|
||||
@ -1363,12 +1368,14 @@ func TestAssumePodVolumes(t *testing.T) {
|
||||
scenario.expectedBindings = scenario.bindings
|
||||
}
|
||||
if scenario.expectedProvisionings == nil {
|
||||
scenario.expectedProvisionings = scenario.provisionedPVCs
|
||||
scenario.expectedProvisionings = scenario.dynamicProvisions
|
||||
}
|
||||
if scenario.shouldFail {
|
||||
testEnv.validateCacheRestored(t, pod, scenario.bindings, scenario.provisionedPVCs)
|
||||
pvcs := convertDynamicProvisionsToPVCs(scenario.dynamicProvisions)
|
||||
testEnv.validateCacheRestored(t, pod, scenario.bindings, pvcs)
|
||||
} else {
|
||||
testEnv.validateAssume(t, pod, scenario.expectedBindings, scenario.expectedProvisionings)
|
||||
pvcs := convertDynamicProvisionsToPVCs(scenario.expectedProvisionings)
|
||||
testEnv.validateAssume(t, pod, scenario.expectedBindings, pvcs)
|
||||
}
|
||||
testEnv.validatePodCache(t, pod.Spec.NodeName, pod, podVolumes, scenario.expectedBindings, scenario.expectedProvisionings)
|
||||
}
|
||||
@ -1386,7 +1393,7 @@ func TestRevertAssumedPodVolumes(t *testing.T) {
|
||||
podPVCs := []*v1.PersistentVolumeClaim{unboundPVC, provisionedPVC}
|
||||
bindings := []*BindingInfo{makeBinding(unboundPVC, pvNode1a)}
|
||||
pvs := []*v1.PersistentVolume{pvNode1a}
|
||||
provisionedPVCs := []*v1.PersistentVolumeClaim{provisionedPVC}
|
||||
dynamicProvisions := []*DynamicProvision{{PVC: provisionedPVC}}
|
||||
expectedBindings := []*BindingInfo{makeBinding(unboundPVC, pvNode1aBound)}
|
||||
expectedProvisionings := []*v1.PersistentVolumeClaim{selectedNodePVC}
|
||||
|
||||
@ -1399,7 +1406,7 @@ func TestRevertAssumedPodVolumes(t *testing.T) {
|
||||
withPVCSVolume(podPVCs).Pod
|
||||
podVolumes := &PodVolumes{
|
||||
StaticBindings: bindings,
|
||||
DynamicProvisions: provisionedPVCs,
|
||||
DynamicProvisions: dynamicProvisions,
|
||||
}
|
||||
testEnv.initVolumes(pvs, pvs)
|
||||
|
||||
@ -1409,8 +1416,9 @@ func TestRevertAssumedPodVolumes(t *testing.T) {
|
||||
}
|
||||
testEnv.validateAssume(t, pod, expectedBindings, expectedProvisionings)
|
||||
|
||||
claims := convertDynamicProvisionsToPVCs(dynamicProvisions)
|
||||
testEnv.binder.RevertAssumedPodVolumes(podVolumes)
|
||||
testEnv.validateCacheRestored(t, pod, bindings, provisionedPVCs)
|
||||
testEnv.validateCacheRestored(t, pod, bindings, claims)
|
||||
}
|
||||
|
||||
func TestBindAPIUpdate(t *testing.T) {
|
||||
@ -2075,9 +2083,13 @@ func TestBindPodVolumes(t *testing.T) {
|
||||
}
|
||||
|
||||
// Execute
|
||||
dynamicProvisions := []*DynamicProvision{}
|
||||
for _, claim := range claimsToProvision {
|
||||
dynamicProvisions = append(dynamicProvisions, &DynamicProvision{PVC: claim})
|
||||
}
|
||||
podVolumes := &PodVolumes{
|
||||
StaticBindings: bindings,
|
||||
DynamicProvisions: claimsToProvision,
|
||||
DynamicProvisions: dynamicProvisions,
|
||||
}
|
||||
err := testEnv.binder.BindPodVolumes(ctx, pod, podVolumes)
|
||||
|
||||
@ -2173,29 +2185,42 @@ func TestCapacity(t *testing.T) {
|
||||
capacities []*storagev1.CSIStorageCapacity
|
||||
|
||||
// Expected return values
|
||||
reasons ConflictReasons
|
||||
shouldFail bool
|
||||
expectedProvisions []*DynamicProvision
|
||||
reasons ConflictReasons
|
||||
shouldFail bool
|
||||
}
|
||||
scenarios := map[string]scenarioType{
|
||||
"network-attached": {
|
||||
pvcs: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
capacities: []*storagev1.CSIStorageCapacity{
|
||||
makeCapacity("net", waitClassWithProvisioner, nil, "1Gi", ""),
|
||||
networkAttachedCapacity,
|
||||
},
|
||||
expectedProvisions: []*DynamicProvision{{
|
||||
PVC: provisionedPVC,
|
||||
NodeCapacity: networkAttachedCapacity,
|
||||
}},
|
||||
},
|
||||
"local-storage": {
|
||||
pvcs: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
capacities: []*storagev1.CSIStorageCapacity{
|
||||
makeCapacity("net", waitClassWithProvisioner, node1, "1Gi", ""),
|
||||
node1Capacity,
|
||||
},
|
||||
expectedProvisions: []*DynamicProvision{{
|
||||
PVC: provisionedPVC,
|
||||
NodeCapacity: node1Capacity,
|
||||
}},
|
||||
},
|
||||
"multiple": {
|
||||
pvcs: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
capacities: []*storagev1.CSIStorageCapacity{
|
||||
makeCapacity("net", waitClassWithProvisioner, nil, "1Gi", ""),
|
||||
makeCapacity("net", waitClassWithProvisioner, node2, "1Gi", ""),
|
||||
makeCapacity("net", waitClassWithProvisioner, node1, "1Gi", ""),
|
||||
networkAttachedCapacity,
|
||||
node2Capacity,
|
||||
node1Capacity,
|
||||
},
|
||||
expectedProvisions: []*DynamicProvision{{
|
||||
PVC: provisionedPVC,
|
||||
NodeCapacity: node1Capacity,
|
||||
}},
|
||||
},
|
||||
"no-storage": {
|
||||
pvcs: []*v1.PersistentVolumeClaim{provisionedPVC},
|
||||
@ -2297,11 +2322,16 @@ func TestCapacity(t *testing.T) {
|
||||
t.Error("returned success but expected error")
|
||||
}
|
||||
checkReasons(t, reasons, expectedReasons)
|
||||
provisions := scenario.pvcs
|
||||
if len(reasons) > 0 {
|
||||
provisions = nil
|
||||
expectedProvisions := scenario.expectedProvisions
|
||||
if !optIn {
|
||||
for i := 0; i < len(expectedProvisions); i++ {
|
||||
expectedProvisions[i].NodeCapacity = nil
|
||||
}
|
||||
}
|
||||
testEnv.validatePodCache(t, pod.Spec.NodeName, pod, podVolumes, nil, provisions)
|
||||
if len(scenario.reasons) > 0 {
|
||||
expectedProvisions = podVolumes.DynamicProvisions
|
||||
}
|
||||
testEnv.validatePodCache(t, pod.Spec.NodeName, pod, podVolumes, nil, expectedProvisions)
|
||||
}
|
||||
|
||||
yesNo := []bool{true, false}
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
"k8s.io/component-helpers/storage/ephemeral"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
@ -70,10 +71,11 @@ func (d *stateData) Clone() framework.StateData {
|
||||
// In the Filter phase, pod binding cache is created for the pod and used in
|
||||
// Reserve and PreBind phases.
|
||||
type VolumeBinding struct {
|
||||
Binder SchedulerVolumeBinder
|
||||
PVCLister corelisters.PersistentVolumeClaimLister
|
||||
scorer volumeCapacityScorer
|
||||
fts feature.Features
|
||||
Binder SchedulerVolumeBinder
|
||||
PVCLister corelisters.PersistentVolumeClaimLister
|
||||
classLister storagelisters.StorageClassLister
|
||||
scorer volumeCapacityScorer
|
||||
fts feature.Features
|
||||
}
|
||||
|
||||
var _ framework.PreFilterPlugin = &VolumeBinding{}
|
||||
@ -451,7 +453,7 @@ func (pl *VolumeBinding) PreScore(ctx context.Context, cs *framework.CycleState,
|
||||
if err != nil {
|
||||
return framework.AsStatus(err)
|
||||
}
|
||||
if state.hasStaticBindings {
|
||||
if state.hasStaticBindings || pl.fts.EnableStorageCapacityScoring {
|
||||
return nil
|
||||
}
|
||||
return framework.NewStatus(framework.Skip)
|
||||
@ -471,20 +473,44 @@ func (pl *VolumeBinding) Score(ctx context.Context, cs *framework.CycleState, po
|
||||
if !ok {
|
||||
return 0, nil
|
||||
}
|
||||
// group by storage class
|
||||
|
||||
classResources := make(classResourceMap)
|
||||
for _, staticBinding := range podVolumes.StaticBindings {
|
||||
class := staticBinding.StorageClassName()
|
||||
storageResource := staticBinding.StorageResource()
|
||||
if _, ok := classResources[class]; !ok {
|
||||
classResources[class] = &StorageResource{
|
||||
Requested: 0,
|
||||
Capacity: 0,
|
||||
if len(podVolumes.StaticBindings) != 0 || !pl.fts.EnableStorageCapacityScoring {
|
||||
// group static binding volumes by storage class
|
||||
for _, staticBinding := range podVolumes.StaticBindings {
|
||||
class := staticBinding.StorageClassName()
|
||||
storageResource := staticBinding.StorageResource()
|
||||
if _, ok := classResources[class]; !ok {
|
||||
classResources[class] = &StorageResource{
|
||||
Requested: 0,
|
||||
Capacity: 0,
|
||||
}
|
||||
}
|
||||
classResources[class].Requested += storageResource.Requested
|
||||
classResources[class].Capacity += storageResource.Capacity
|
||||
}
|
||||
} else {
|
||||
// group dynamic binding volumes by storage class
|
||||
for _, provision := range podVolumes.DynamicProvisions {
|
||||
if provision.NodeCapacity == nil {
|
||||
continue
|
||||
}
|
||||
class := *provision.PVC.Spec.StorageClassName
|
||||
if _, ok := classResources[class]; !ok {
|
||||
classResources[class] = &StorageResource{
|
||||
Requested: 0,
|
||||
Capacity: 0,
|
||||
}
|
||||
}
|
||||
// The following line cannot be +=. For example, if a Pod requests two 50GB volumes from
|
||||
// a StorageClass with 100GB of capacity on a node, this part of the code will be executed twice.
|
||||
// In that case, using += would incorrectly set classResources[class].Capacity to 200GB.
|
||||
classResources[class].Capacity = provision.NodeCapacity.Capacity.Value()
|
||||
requestedQty := provision.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
classResources[class].Requested += requestedQty.Value()
|
||||
}
|
||||
classResources[class].Requested += storageResource.Requested
|
||||
classResources[class].Capacity += storageResource.Capacity
|
||||
}
|
||||
|
||||
return pl.scorer(classResources), nil
|
||||
}
|
||||
|
||||
@ -566,7 +592,7 @@ func New(ctx context.Context, plArgs runtime.Object, fh framework.Handle, fts fe
|
||||
return nil, fmt.Errorf("want args to be of type VolumeBindingArgs, got %T", plArgs)
|
||||
}
|
||||
if err := validation.ValidateVolumeBindingArgsWithOptions(nil, args, validation.VolumeBindingArgsValidationOptions{
|
||||
AllowVolumeCapacityPriority: fts.EnableVolumeCapacityPriority,
|
||||
AllowStorageCapacityScoring: fts.EnableStorageCapacityScoring,
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -584,7 +610,7 @@ func New(ctx context.Context, plArgs runtime.Object, fh framework.Handle, fts fe
|
||||
|
||||
// build score function
|
||||
var scorer volumeCapacityScorer
|
||||
if fts.EnableVolumeCapacityPriority {
|
||||
if fts.EnableStorageCapacityScoring {
|
||||
shape := make(helper.FunctionShape, 0, len(args.Shape))
|
||||
for _, point := range args.Shape {
|
||||
shape = append(shape, helper.FunctionShapePoint{
|
||||
@ -595,9 +621,10 @@ func New(ctx context.Context, plArgs runtime.Object, fh framework.Handle, fts fe
|
||||
scorer = buildScorerFunction(shape)
|
||||
}
|
||||
return &VolumeBinding{
|
||||
Binder: binder,
|
||||
PVCLister: pvcInformer.Lister(),
|
||||
scorer: scorer,
|
||||
fts: fts,
|
||||
Binder: binder,
|
||||
PVCLister: pvcInformer.Lister(),
|
||||
classLister: storageClassInformer.Lister(),
|
||||
scorer: scorer,
|
||||
fts: fts,
|
||||
}, nil
|
||||
}
|
||||
|
@ -59,6 +59,22 @@ var (
|
||||
},
|
||||
VolumeBindingMode: &waitForFirstConsumer,
|
||||
}
|
||||
waitSCWithStorageCapacity = &storagev1.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "wait-sc-with-storage-capacity",
|
||||
},
|
||||
Provisioner: "driver-with-storage-capacity",
|
||||
VolumeBindingMode: &waitForFirstConsumer,
|
||||
}
|
||||
|
||||
driverWithStorageCapacity = &storagev1.CSIDriver{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "driver-with-storage-capacity",
|
||||
},
|
||||
Spec: storagev1.CSIDriverSpec{
|
||||
StorageCapacity: ptr.To(true),
|
||||
},
|
||||
}
|
||||
|
||||
defaultShapePoint = []config.UtilizationShapePoint{
|
||||
{
|
||||
@ -79,6 +95,7 @@ func TestVolumeBinding(t *testing.T) {
|
||||
nodes []*v1.Node
|
||||
pvcs []*v1.PersistentVolumeClaim
|
||||
pvs []*v1.PersistentVolume
|
||||
capacities []*storagev1.CSIStorageCapacity
|
||||
fts feature.Features
|
||||
args *config.VolumeBindingArgs
|
||||
wantPreFilterResult *framework.PreFilterResult
|
||||
@ -310,7 +327,7 @@ func TestVolumeBinding(t *testing.T) {
|
||||
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
|
||||
},
|
||||
fts: feature.Features{
|
||||
EnableVolumeCapacityPriority: true,
|
||||
EnableStorageCapacityScoring: true,
|
||||
},
|
||||
wantPreFilterStatus: nil,
|
||||
wantStateAfterPreFilter: &stateData{
|
||||
@ -400,7 +417,7 @@ func TestVolumeBinding(t *testing.T) {
|
||||
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
|
||||
},
|
||||
fts: feature.Features{
|
||||
EnableVolumeCapacityPriority: true,
|
||||
EnableStorageCapacityScoring: true,
|
||||
},
|
||||
wantPreFilterStatus: nil,
|
||||
wantStateAfterPreFilter: &stateData{
|
||||
@ -519,7 +536,7 @@ func TestVolumeBinding(t *testing.T) {
|
||||
}).PersistentVolume,
|
||||
},
|
||||
fts: feature.Features{
|
||||
EnableVolumeCapacityPriority: true,
|
||||
EnableStorageCapacityScoring: true,
|
||||
},
|
||||
wantPreFilterStatus: nil,
|
||||
wantStateAfterPreFilter: &stateData{
|
||||
@ -637,7 +654,7 @@ func TestVolumeBinding(t *testing.T) {
|
||||
}).PersistentVolume,
|
||||
},
|
||||
fts: feature.Features{
|
||||
EnableVolumeCapacityPriority: true,
|
||||
EnableStorageCapacityScoring: true,
|
||||
},
|
||||
args: &config.VolumeBindingArgs{
|
||||
BindTimeoutSeconds: 300,
|
||||
@ -716,6 +733,250 @@ func TestVolumeBinding(t *testing.T) {
|
||||
0,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "storage capacity score",
|
||||
pod: makePod("pod-a").withPVCVolume("pvc-dynamic", "").Pod,
|
||||
nodes: []*v1.Node{
|
||||
makeNode("node-a").withLabel(nodeLabelKey, "node-a").Node,
|
||||
makeNode("node-b").withLabel(nodeLabelKey, "node-b").Node,
|
||||
makeNode("node-c").withLabel(nodeLabelKey, "node-c").Node,
|
||||
},
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-dynamic", waitSCWithStorageCapacity.Name).withRequestStorage(resource.MustParse("10Gi")).PersistentVolumeClaim,
|
||||
},
|
||||
capacities: []*storagev1.CSIStorageCapacity{
|
||||
makeCapacity("node-a", waitSCWithStorageCapacity.Name, makeNode("node-a").withLabel(nodeLabelKey, "node-a").Node, "100Gi", ""),
|
||||
makeCapacity("node-b", waitSCWithStorageCapacity.Name, makeNode("node-b").withLabel(nodeLabelKey, "node-b").Node, "50Gi", ""),
|
||||
makeCapacity("node-c", waitSCWithStorageCapacity.Name, makeNode("node-c").withLabel(nodeLabelKey, "node-c").Node, "10Gi", ""),
|
||||
},
|
||||
fts: feature.Features{
|
||||
EnableStorageCapacityScoring: true,
|
||||
},
|
||||
wantPreFilterStatus: nil,
|
||||
wantStateAfterPreFilter: &stateData{
|
||||
podVolumeClaims: &PodVolumeClaims{
|
||||
boundClaims: []*v1.PersistentVolumeClaim{},
|
||||
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-dynamic", waitSCWithStorageCapacity.Name).withRequestStorage(resource.MustParse("10Gi")).PersistentVolumeClaim,
|
||||
},
|
||||
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{waitSCWithStorageCapacity.Name: {}},
|
||||
},
|
||||
podVolumesByNode: map[string]*PodVolumes{},
|
||||
},
|
||||
wantFilterStatus: []*framework.Status{
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
},
|
||||
wantScores: []int64{
|
||||
10,
|
||||
20,
|
||||
100,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "storage capacity score with static binds",
|
||||
pod: makePod("pod-a").withPVCVolume("pvc-dynamic", "").withPVCVolume("pvc-static", "").Pod,
|
||||
nodes: []*v1.Node{
|
||||
makeNode("node-a").withLabel(nodeLabelKey, "node-a").Node,
|
||||
makeNode("node-b").withLabel(nodeLabelKey, "node-b").Node,
|
||||
makeNode("node-c").withLabel(nodeLabelKey, "node-c").Node,
|
||||
},
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-dynamic", waitSCWithStorageCapacity.Name).withRequestStorage(resource.MustParse("10Gi")).PersistentVolumeClaim,
|
||||
makePVC("pvc-static", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
|
||||
},
|
||||
pvs: []*v1.PersistentVolume{
|
||||
makePV("pv-static-a", waitSC.Name).
|
||||
withPhase(v1.VolumeAvailable).
|
||||
withCapacity(resource.MustParse("100Gi")).
|
||||
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
|
||||
makePV("pv-static-b", waitSC.Name).
|
||||
withPhase(v1.VolumeAvailable).
|
||||
withCapacity(resource.MustParse("100Gi")).
|
||||
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
|
||||
makePV("pv-static-c", waitSC.Name).
|
||||
withPhase(v1.VolumeAvailable).
|
||||
withCapacity(resource.MustParse("100Gi")).
|
||||
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-c"}}).PersistentVolume,
|
||||
},
|
||||
capacities: []*storagev1.CSIStorageCapacity{
|
||||
makeCapacity("node-a", waitSCWithStorageCapacity.Name, makeNode("node-a").withLabel(nodeLabelKey, "node-a").Node, "100Gi", ""),
|
||||
makeCapacity("node-b", waitSCWithStorageCapacity.Name, makeNode("node-b").withLabel(nodeLabelKey, "node-b").Node, "50Gi", ""),
|
||||
makeCapacity("node-c", waitSCWithStorageCapacity.Name, makeNode("node-c").withLabel(nodeLabelKey, "node-c").Node, "10Gi", ""),
|
||||
},
|
||||
fts: feature.Features{
|
||||
EnableStorageCapacityScoring: true,
|
||||
},
|
||||
wantPreFilterStatus: nil,
|
||||
wantStateAfterPreFilter: &stateData{
|
||||
podVolumeClaims: &PodVolumeClaims{
|
||||
boundClaims: []*v1.PersistentVolumeClaim{},
|
||||
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-dynamic", waitSCWithStorageCapacity.Name).withRequestStorage(resource.MustParse("10Gi")).PersistentVolumeClaim,
|
||||
makePVC("pvc-static", waitSC.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
|
||||
},
|
||||
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{
|
||||
waitSC.Name: {
|
||||
makePV("pv-static-a", waitSC.Name).
|
||||
withPhase(v1.VolumeAvailable).
|
||||
withCapacity(resource.MustParse("100Gi")).
|
||||
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-a"}}).PersistentVolume,
|
||||
makePV("pv-static-b", waitSC.Name).
|
||||
withPhase(v1.VolumeAvailable).
|
||||
withCapacity(resource.MustParse("100Gi")).
|
||||
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-b"}}).PersistentVolume,
|
||||
makePV("pv-static-c", waitSC.Name).
|
||||
withPhase(v1.VolumeAvailable).
|
||||
withCapacity(resource.MustParse("100Gi")).
|
||||
withNodeAffinity(map[string][]string{v1.LabelHostname: {"node-c"}}).PersistentVolume,
|
||||
},
|
||||
waitSCWithStorageCapacity.Name: {},
|
||||
},
|
||||
},
|
||||
podVolumesByNode: map[string]*PodVolumes{},
|
||||
},
|
||||
wantFilterStatus: []*framework.Status{
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
},
|
||||
wantScores: []int64{
|
||||
50,
|
||||
50,
|
||||
50,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "dynamic provisioning with multiple PVCs of the same StorageClass",
|
||||
pod: makePod("pod-a").withPVCVolume("pvc-dynamic-0", "").withPVCVolume("pvc-dynamic-1", "").Pod,
|
||||
nodes: []*v1.Node{
|
||||
makeNode("node-a").withLabel(nodeLabelKey, "node-a").Node,
|
||||
},
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-dynamic-0", waitSCWithStorageCapacity.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
|
||||
makePVC("pvc-dynamic-1", waitSCWithStorageCapacity.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
|
||||
},
|
||||
capacities: []*storagev1.CSIStorageCapacity{
|
||||
makeCapacity("node-a", waitSCWithStorageCapacity.Name, makeNode("node-a").withLabel(nodeLabelKey, "node-a").Node, "100Gi", ""),
|
||||
},
|
||||
fts: feature.Features{
|
||||
EnableStorageCapacityScoring: true,
|
||||
},
|
||||
wantPreFilterStatus: nil,
|
||||
wantStateAfterPreFilter: &stateData{
|
||||
podVolumeClaims: &PodVolumeClaims{
|
||||
boundClaims: []*v1.PersistentVolumeClaim{},
|
||||
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-dynamic-0", waitSCWithStorageCapacity.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
|
||||
makePVC("pvc-dynamic-1", waitSCWithStorageCapacity.Name).withRequestStorage(resource.MustParse("50Gi")).PersistentVolumeClaim,
|
||||
},
|
||||
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{waitSCWithStorageCapacity.Name: {}},
|
||||
},
|
||||
podVolumesByNode: map[string]*PodVolumes{},
|
||||
},
|
||||
wantFilterStatus: []*framework.Status{
|
||||
nil,
|
||||
},
|
||||
wantScores: []int64{
|
||||
100,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "prefer node with least allocatable",
|
||||
pod: makePod("pod-a").withPVCVolume("pvc-dynamic", "").Pod,
|
||||
nodes: []*v1.Node{
|
||||
makeNode("node-a").withLabel(nodeLabelKey, "node-a").Node,
|
||||
makeNode("node-b").withLabel(nodeLabelKey, "node-b").Node,
|
||||
makeNode("node-c").withLabel(nodeLabelKey, "node-c").Node,
|
||||
},
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-dynamic", waitSCWithStorageCapacity.Name).withRequestStorage(resource.MustParse("10Gi")).PersistentVolumeClaim,
|
||||
},
|
||||
capacities: []*storagev1.CSIStorageCapacity{
|
||||
makeCapacity("node-a", waitSCWithStorageCapacity.Name, makeNode("node-a").withLabel(nodeLabelKey, "node-a").Node, "100Gi", ""),
|
||||
makeCapacity("node-b", waitSCWithStorageCapacity.Name, makeNode("node-b").withLabel(nodeLabelKey, "node-b").Node, "20Gi", ""),
|
||||
makeCapacity("node-c", waitSCWithStorageCapacity.Name, makeNode("node-c").withLabel(nodeLabelKey, "node-c").Node, "10Gi", ""),
|
||||
},
|
||||
fts: feature.Features{
|
||||
EnableStorageCapacityScoring: true,
|
||||
},
|
||||
wantPreFilterStatus: nil,
|
||||
wantStateAfterPreFilter: &stateData{
|
||||
podVolumeClaims: &PodVolumeClaims{
|
||||
boundClaims: []*v1.PersistentVolumeClaim{},
|
||||
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-dynamic", waitSCWithStorageCapacity.Name).withRequestStorage(resource.MustParse("10Gi")).PersistentVolumeClaim,
|
||||
},
|
||||
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{waitSCWithStorageCapacity.Name: {}},
|
||||
},
|
||||
podVolumesByNode: map[string]*PodVolumes{},
|
||||
},
|
||||
wantFilterStatus: []*framework.Status{
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
},
|
||||
wantScores: []int64{
|
||||
10,
|
||||
50,
|
||||
100,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "prefer node with maximum allocatable",
|
||||
pod: makePod("pod-a").withPVCVolume("pvc-dynamic", "").Pod,
|
||||
nodes: []*v1.Node{
|
||||
makeNode("node-a").withLabel(nodeLabelKey, "node-a").Node,
|
||||
makeNode("node-b").withLabel(nodeLabelKey, "node-b").Node,
|
||||
makeNode("node-c").withLabel(nodeLabelKey, "node-c").Node,
|
||||
},
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-dynamic", waitSCWithStorageCapacity.Name).withRequestStorage(resource.MustParse("10Gi")).PersistentVolumeClaim,
|
||||
},
|
||||
capacities: []*storagev1.CSIStorageCapacity{
|
||||
makeCapacity("node-a", waitSCWithStorageCapacity.Name, makeNode("node-a").withLabel(nodeLabelKey, "node-a").Node, "100Gi", ""),
|
||||
makeCapacity("node-b", waitSCWithStorageCapacity.Name, makeNode("node-b").withLabel(nodeLabelKey, "node-b").Node, "20Gi", ""),
|
||||
makeCapacity("node-c", waitSCWithStorageCapacity.Name, makeNode("node-c").withLabel(nodeLabelKey, "node-c").Node, "10Gi", ""),
|
||||
},
|
||||
fts: feature.Features{
|
||||
EnableStorageCapacityScoring: true,
|
||||
},
|
||||
args: &config.VolumeBindingArgs{
|
||||
BindTimeoutSeconds: 300,
|
||||
Shape: []config.UtilizationShapePoint{
|
||||
{
|
||||
Utilization: 0,
|
||||
Score: int32(config.MaxCustomPriorityScore),
|
||||
},
|
||||
{
|
||||
Utilization: 100,
|
||||
Score: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
wantPreFilterStatus: nil,
|
||||
wantStateAfterPreFilter: &stateData{
|
||||
podVolumeClaims: &PodVolumeClaims{
|
||||
boundClaims: []*v1.PersistentVolumeClaim{},
|
||||
unboundClaimsDelayBinding: []*v1.PersistentVolumeClaim{
|
||||
makePVC("pvc-dynamic", waitSCWithStorageCapacity.Name).withRequestStorage(resource.MustParse("10Gi")).PersistentVolumeClaim,
|
||||
},
|
||||
unboundVolumesDelayBinding: map[string][]*v1.PersistentVolume{waitSCWithStorageCapacity.Name: {}},
|
||||
},
|
||||
podVolumesByNode: map[string]*PodVolumes{},
|
||||
},
|
||||
wantFilterStatus: []*framework.Status{
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
},
|
||||
wantScores: []int64{
|
||||
90,
|
||||
50,
|
||||
0,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, item := range table {
|
||||
@ -740,7 +1001,7 @@ func TestVolumeBinding(t *testing.T) {
|
||||
args = &config.VolumeBindingArgs{
|
||||
BindTimeoutSeconds: 300,
|
||||
}
|
||||
if item.fts.EnableVolumeCapacityPriority {
|
||||
if item.fts.EnableStorageCapacityScoring {
|
||||
args.Shape = defaultShapePoint
|
||||
}
|
||||
}
|
||||
@ -751,17 +1012,50 @@ func TestVolumeBinding(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Log("Feed testing data and wait for them to be synced")
|
||||
client.StorageV1().StorageClasses().Create(ctx, immediateSC, metav1.CreateOptions{})
|
||||
client.StorageV1().StorageClasses().Create(ctx, waitSC, metav1.CreateOptions{})
|
||||
client.StorageV1().StorageClasses().Create(ctx, waitHDDSC, metav1.CreateOptions{})
|
||||
_, err = client.StorageV1().StorageClasses().Create(ctx, immediateSC, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = client.StorageV1().StorageClasses().Create(ctx, waitSC, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = client.StorageV1().StorageClasses().Create(ctx, waitHDDSC, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = client.StorageV1().StorageClasses().Create(ctx, waitSCWithStorageCapacity, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, err = client.StorageV1().CSIDrivers().Create(ctx, driverWithStorageCapacity, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, node := range item.nodes {
|
||||
client.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{})
|
||||
_, err = client.CoreV1().Nodes().Create(ctx, node, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
for _, pvc := range item.pvcs {
|
||||
client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{})
|
||||
_, err = client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
for _, pv := range item.pvs {
|
||||
client.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{})
|
||||
_, err = client.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
for _, capacity := range item.capacities {
|
||||
_, err = client.StorageV1().CSIStorageCapacities(capacity.Namespace).Create(ctx, capacity, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
t.Log("Start informer factory after initialization")
|
||||
|
@ -159,17 +159,17 @@ type VolumeBindingArgs struct {
|
||||
BindTimeoutSeconds *int64 `json:"bindTimeoutSeconds,omitempty"`
|
||||
|
||||
// Shape specifies the points defining the score function shape, which is
|
||||
// used to score nodes based on the utilization of statically provisioned
|
||||
// PVs. The utilization is calculated by dividing the total requested
|
||||
// used to score nodes based on the utilization of provisioned PVs.
|
||||
// The utilization is calculated by dividing the total requested
|
||||
// storage of the pod by the total capacity of feasible PVs on each node.
|
||||
// Each point contains utilization (ranges from 0 to 100) and its
|
||||
// associated score (ranges from 0 to 10). You can turn the priority by
|
||||
// specifying different scores for different utilization numbers.
|
||||
// The default shape points are:
|
||||
// 1) 0 for 0 utilization
|
||||
// 2) 10 for 100 utilization
|
||||
// 1) 10 for 0 utilization
|
||||
// 2) 0 for 100 utilization
|
||||
// All points must be sorted in increasing order by utilization.
|
||||
// +featureGate=VolumeCapacityPriority
|
||||
// +featureGate=StorageCapacityScoring
|
||||
// +optional
|
||||
// +listType=atomic
|
||||
Shape []UtilizationShapePoint `json:"shape,omitempty"`
|
||||
|
@ -1367,6 +1367,12 @@
|
||||
lockToDefault: true
|
||||
preRelease: GA
|
||||
version: "1.31"
|
||||
- name: StorageCapacityScoring
|
||||
versionedSpecs:
|
||||
- default: false
|
||||
lockToDefault: false
|
||||
preRelease: Alpha
|
||||
version: "1.33"
|
||||
- name: StorageNamespaceIndex
|
||||
versionedSpecs:
|
||||
- default: true
|
||||
@ -1577,12 +1583,6 @@
|
||||
lockToDefault: false
|
||||
preRelease: Beta
|
||||
version: "1.31"
|
||||
- name: VolumeCapacityPriority
|
||||
versionedSpecs:
|
||||
- default: false
|
||||
lockToDefault: false
|
||||
preRelease: Alpha
|
||||
version: "1.21"
|
||||
- name: WatchCacheInitializationPostStartHook
|
||||
versionedSpecs:
|
||||
- default: false
|
||||
|
@ -16,7 +16,7 @@ limitations under the License.
|
||||
|
||||
package volumescheduling
|
||||
|
||||
// This file tests the VolumeCapacityPriority feature.
|
||||
// This file tests the StorageCapacityScoring feature.
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -46,7 +46,7 @@ func mergeNodeLabels(node *v1.Node, labels map[string]string) *v1.Node {
|
||||
return node
|
||||
}
|
||||
|
||||
func setupClusterForVolumeCapacityPriority(t *testing.T, nsName string, resyncPeriod time.Duration, provisionDelaySeconds int) *testConfig {
|
||||
func setupClusterForStorageCapacityScoring(t *testing.T, nsName string, resyncPeriod time.Duration, provisionDelaySeconds int) *testConfig {
|
||||
testCtx := testutil.InitTestSchedulerWithOptions(t, testutil.InitTestAPIServer(t, nsName, nil), resyncPeriod)
|
||||
testutil.SyncSchedulerInformerFactory(testCtx)
|
||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||
@ -75,10 +75,10 @@ func setupClusterForVolumeCapacityPriority(t *testing.T, nsName string, resyncPe
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeCapacityPriority(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeCapacityPriority, true)
|
||||
func TestStorageCapacityScoring(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StorageCapacityScoring, true)
|
||||
|
||||
config := setupClusterForVolumeCapacityPriority(t, "volume-capacity-priority", 0, 0)
|
||||
config := setupClusterForStorageCapacityScoring(t, "storage-capacity-scoring", 0, 0)
|
||||
defer config.teardown()
|
||||
|
||||
tests := []struct {
|
||||
@ -90,7 +90,7 @@ func TestVolumeCapacityPriority(t *testing.T) {
|
||||
wantNodeName string
|
||||
}{
|
||||
{
|
||||
name: "local volumes with close capacity are preferred",
|
||||
name: "local volumes with max capacity are preferred",
|
||||
pod: makePod("pod", config.ns, []string{"data"}),
|
||||
nodes: []*v1.Node{
|
||||
makeNode(0),
|
||||
@ -108,10 +108,10 @@ func TestVolumeCapacityPriority(t *testing.T) {
|
||||
pvcs: []*v1.PersistentVolumeClaim{
|
||||
setPVCRequestStorage(makePVC("data", config.ns, &waitSSDSC.Name, ""), resource.MustParse("20Gi")),
|
||||
},
|
||||
wantNodeName: "node-2",
|
||||
wantNodeName: "node-0",
|
||||
},
|
||||
{
|
||||
name: "local volumes with close capacity are preferred (multiple pvcs)",
|
||||
name: "local volumes with max capacity are preferred (multiple pvcs)",
|
||||
pod: makePod("pod", config.ns, []string{"data-0", "data-1"}),
|
||||
nodes: []*v1.Node{
|
||||
makeNode(0),
|
||||
@ -130,10 +130,10 @@ func TestVolumeCapacityPriority(t *testing.T) {
|
||||
setPVCRequestStorage(makePVC("data-0", config.ns, &waitSSDSC.Name, ""), resource.MustParse("80Gi")),
|
||||
setPVCRequestStorage(makePVC("data-1", config.ns, &waitSSDSC.Name, ""), resource.MustParse("80Gi")),
|
||||
},
|
||||
wantNodeName: "node-1",
|
||||
wantNodeName: "node-0",
|
||||
},
|
||||
{
|
||||
name: "local volumes with close capacity are preferred (multiple pvcs, multiple classes)",
|
||||
name: "local volumes with max capacity are preferred (multiple pvcs, multiple classes)",
|
||||
pod: makePod("pod", config.ns, []string{"data-0", "data-1"}),
|
||||
nodes: []*v1.Node{
|
||||
makeNode(0),
|
||||
@ -152,10 +152,10 @@ func TestVolumeCapacityPriority(t *testing.T) {
|
||||
setPVCRequestStorage(makePVC("data-0", config.ns, &waitSSDSC.Name, ""), resource.MustParse("80Gi")),
|
||||
setPVCRequestStorage(makePVC("data-1", config.ns, &waitHDDSC.Name, ""), resource.MustParse("80Gi")),
|
||||
},
|
||||
wantNodeName: "node-1",
|
||||
wantNodeName: "node-0",
|
||||
},
|
||||
{
|
||||
name: "zonal volumes with close capacity are preferred (multiple pvcs, multiple classes)",
|
||||
name: "zonal volumes with max capacity are preferred (multiple pvcs, multiple classes)",
|
||||
pod: makePod("pod", config.ns, []string{"data-0", "data-1"}),
|
||||
nodes: []*v1.Node{
|
||||
mergeNodeLabels(makeNode(0), map[string]string{
|
||||
@ -201,7 +201,7 @@ func TestVolumeCapacityPriority(t *testing.T) {
|
||||
setPVCRequestStorage(makePVC("data-0", config.ns, &waitSSDSC.Name, ""), resource.MustParse("80Gi")),
|
||||
setPVCRequestStorage(makePVC("data-1", config.ns, &waitHDDSC.Name, ""), resource.MustParse("80Gi")),
|
||||
},
|
||||
wantNodeName: "node-1",
|
||||
wantNodeName: "node-0",
|
||||
},
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user