mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
Merge pull request #77595 from bertinatto/volume_limits
Volume Scheduling Limits
This commit is contained in:
commit
22fb6fd174
15
api/openapi-spec/swagger.json
generated
15
api/openapi-spec/swagger.json
generated
@ -16102,6 +16102,10 @@
|
||||
"io.k8s.api.storage.v1beta1.CSINodeDriver": {
|
||||
"description": "CSINodeDriver holds information about the specification of one CSI driver installed on a node",
|
||||
"properties": {
|
||||
"allocatable": {
|
||||
"$ref": "#/definitions/io.k8s.api.storage.v1beta1.VolumeNodeResources",
|
||||
"description": "allocatable represents the volume resources of a node that are available for scheduling."
|
||||
},
|
||||
"name": {
|
||||
"description": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.",
|
||||
"type": "string"
|
||||
@ -16427,6 +16431,17 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"io.k8s.api.storage.v1beta1.VolumeNodeResources": {
|
||||
"description": "VolumeNodeResources is a set of resource limits for scheduling of volumes.",
|
||||
"properties": {
|
||||
"count": {
|
||||
"description": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded.",
|
||||
"format": "int32",
|
||||
"type": "integer"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"io.k8s.apiextensions-apiserver.pkg.apis.apiextensions.v1beta1.CustomResourceColumnDefinition": {
|
||||
"description": "CustomResourceColumnDefinition specifies a column for server side printing.",
|
||||
"properties": {
|
||||
|
@ -172,6 +172,7 @@ func Run(cc schedulerserverconfig.CompletedConfig, stopCh <-chan struct{}) error
|
||||
cc.InformerFactory.Core().V1().Services(),
|
||||
cc.InformerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
cc.InformerFactory.Storage().V1().StorageClasses(),
|
||||
cc.InformerFactory.Storage().V1beta1().CSINodes(),
|
||||
cc.Recorder,
|
||||
cc.ComponentConfig.AlgorithmSource,
|
||||
stopCh,
|
||||
|
@ -355,6 +355,20 @@ type CSINodeDriver struct {
|
||||
// This can be empty if driver does not support topology.
|
||||
// +optional
|
||||
TopologyKeys []string
|
||||
|
||||
// allocatable represents the volume resources of a node that are available for scheduling.
|
||||
// +optional
|
||||
Allocatable *VolumeNodeResources
|
||||
}
|
||||
|
||||
// VolumeNodeResources is a set of resource limits for scheduling of volumes.
|
||||
type VolumeNodeResources struct {
|
||||
// Maximum number of unique volumes managed by the CSI driver that can be used on a node.
|
||||
// A volume that is both attached and mounted on a node is considered to be used once, not twice.
|
||||
// The same rule applies for a unique volume that is shared among multiple pods on the same node.
|
||||
// If this field is nil, then the supported number of volumes on this node is unbounded.
|
||||
// +optional
|
||||
Count *int32
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
32
pkg/apis/storage/v1beta1/zz_generated.conversion.go
generated
32
pkg/apis/storage/v1beta1/zz_generated.conversion.go
generated
@ -189,6 +189,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1beta1.VolumeNodeResources)(nil), (*storage.VolumeNodeResources)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1beta1_VolumeNodeResources_To_storage_VolumeNodeResources(a.(*v1beta1.VolumeNodeResources), b.(*storage.VolumeNodeResources), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*storage.VolumeNodeResources)(nil), (*v1beta1.VolumeNodeResources)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_storage_VolumeNodeResources_To_v1beta1_VolumeNodeResources(a.(*storage.VolumeNodeResources), b.(*v1beta1.VolumeNodeResources), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -292,6 +302,7 @@ func autoConvert_v1beta1_CSINodeDriver_To_storage_CSINodeDriver(in *v1beta1.CSIN
|
||||
out.Name = in.Name
|
||||
out.NodeID = in.NodeID
|
||||
out.TopologyKeys = *(*[]string)(unsafe.Pointer(&in.TopologyKeys))
|
||||
out.Allocatable = (*storage.VolumeNodeResources)(unsafe.Pointer(in.Allocatable))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -304,6 +315,7 @@ func autoConvert_storage_CSINodeDriver_To_v1beta1_CSINodeDriver(in *storage.CSIN
|
||||
out.Name = in.Name
|
||||
out.NodeID = in.NodeID
|
||||
out.TopologyKeys = *(*[]string)(unsafe.Pointer(&in.TopologyKeys))
|
||||
out.Allocatable = (*v1beta1.VolumeNodeResources)(unsafe.Pointer(in.Allocatable))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -597,3 +609,23 @@ func autoConvert_storage_VolumeError_To_v1beta1_VolumeError(in *storage.VolumeEr
|
||||
func Convert_storage_VolumeError_To_v1beta1_VolumeError(in *storage.VolumeError, out *v1beta1.VolumeError, s conversion.Scope) error {
|
||||
return autoConvert_storage_VolumeError_To_v1beta1_VolumeError(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1beta1_VolumeNodeResources_To_storage_VolumeNodeResources(in *v1beta1.VolumeNodeResources, out *storage.VolumeNodeResources, s conversion.Scope) error {
|
||||
out.Count = (*int32)(unsafe.Pointer(in.Count))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1beta1_VolumeNodeResources_To_storage_VolumeNodeResources is an autogenerated conversion function.
|
||||
func Convert_v1beta1_VolumeNodeResources_To_storage_VolumeNodeResources(in *v1beta1.VolumeNodeResources, out *storage.VolumeNodeResources, s conversion.Scope) error {
|
||||
return autoConvert_v1beta1_VolumeNodeResources_To_storage_VolumeNodeResources(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_storage_VolumeNodeResources_To_v1beta1_VolumeNodeResources(in *storage.VolumeNodeResources, out *v1beta1.VolumeNodeResources, s conversion.Scope) error {
|
||||
out.Count = (*int32)(unsafe.Pointer(in.Count))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_storage_VolumeNodeResources_To_v1beta1_VolumeNodeResources is an autogenerated conversion function.
|
||||
func Convert_storage_VolumeNodeResources_To_v1beta1_VolumeNodeResources(in *storage.VolumeNodeResources, out *v1beta1.VolumeNodeResources, s conversion.Scope) error {
|
||||
return autoConvert_storage_VolumeNodeResources_To_v1beta1_VolumeNodeResources(in, out, s)
|
||||
}
|
||||
|
@ -36,6 +36,7 @@ go_test(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -351,12 +351,25 @@ func validateCSINodeDriverNodeID(nodeID string, fldPath *field.Path) field.Error
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateCSINodeDriverAllocatable tests if Allocatable in CSINodeDriver has valid volume limits.
|
||||
func validateCSINodeDriverAllocatable(a *storage.VolumeNodeResources, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if a == nil || a.Count == nil {
|
||||
return allErrs
|
||||
}
|
||||
|
||||
allErrs = append(allErrs, apivalidation.ValidateNonnegativeField(int64(*a.Count), fldPath.Child("count"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateCSINodeDriver tests if CSINodeDriver has valid entries
|
||||
func validateCSINodeDriver(driver storage.CSINodeDriver, driverNamesInSpecs sets.String, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
allErrs = append(allErrs, apivalidation.ValidateCSIDriverName(driver.Name, fldPath.Child("name"))...)
|
||||
allErrs = append(allErrs, validateCSINodeDriverNodeID(driver.NodeID, fldPath.Child("nodeID"))...)
|
||||
allErrs = append(allErrs, validateCSINodeDriverAllocatable(driver.Allocatable, fldPath.Child("allocatable"))...)
|
||||
|
||||
// check for duplicate entries for the same driver in specs
|
||||
if driverNamesInSpecs.Has(driver.Name) {
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/storage"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -1152,6 +1153,34 @@ func TestCSINodeValidation(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Volume limits being zero
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo11"},
|
||||
Spec: storage.CSINodeSpec{
|
||||
Drivers: []storage.CSINodeDriver{
|
||||
{
|
||||
Name: "io.kubernetes.storage.csi.driver",
|
||||
NodeID: nodeID,
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
Allocatable: &storage.VolumeNodeResources{Count: utilpointer.Int32Ptr(0)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Volume limits with positive number
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo11"},
|
||||
Spec: storage.CSINodeSpec{
|
||||
Drivers: []storage.CSINodeDriver{
|
||||
{
|
||||
Name: "io.kubernetes.storage.csi.driver",
|
||||
NodeID: nodeID,
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
Allocatable: &storage.VolumeNodeResources{Count: utilpointer.Int32Ptr(1)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// topology key names with -, _, and dot .
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo8"},
|
||||
@ -1368,6 +1397,20 @@ func TestCSINodeValidation(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Volume limits with negative number
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo11"},
|
||||
Spec: storage.CSINodeSpec{
|
||||
Drivers: []storage.CSINodeDriver{
|
||||
{
|
||||
Name: "io.kubernetes.storage.csi.driver",
|
||||
NodeID: nodeID,
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
Allocatable: &storage.VolumeNodeResources{Count: utilpointer.Int32Ptr(-1)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// topology prefix should be lower case
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo14"},
|
||||
@ -1409,6 +1452,7 @@ func TestCSINodeUpdateValidation(t *testing.T) {
|
||||
Name: "io.kubernetes.storage.csi.driver-2",
|
||||
NodeID: nodeID,
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
Allocatable: &storage.VolumeNodeResources{Count: utilpointer.Int32Ptr(20)},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1429,6 +1473,7 @@ func TestCSINodeUpdateValidation(t *testing.T) {
|
||||
Name: "io.kubernetes.storage.csi.driver-2",
|
||||
NodeID: nodeID,
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
Allocatable: &storage.VolumeNodeResources{Count: utilpointer.Int32Ptr(20)},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1460,11 +1505,13 @@ func TestCSINodeUpdateValidation(t *testing.T) {
|
||||
Name: "io.kubernetes.storage.csi.driver-2",
|
||||
NodeID: nodeID,
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
Allocatable: &storage.VolumeNodeResources{Count: utilpointer.Int32Ptr(20)},
|
||||
},
|
||||
{
|
||||
Name: "io.kubernetes.storage.csi.driver-3",
|
||||
NodeID: nodeID,
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
Allocatable: &storage.VolumeNodeResources{Count: utilpointer.Int32Ptr(30)},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1483,6 +1530,7 @@ func TestCSINodeUpdateValidation(t *testing.T) {
|
||||
Name: "io.kubernetes.storage.csi.new-driver",
|
||||
NodeID: nodeID,
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
Allocatable: &storage.VolumeNodeResources{Count: utilpointer.Int32Ptr(30)},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1510,6 +1558,7 @@ func TestCSINodeUpdateValidation(t *testing.T) {
|
||||
Name: "io.kubernetes.storage.csi.driver-2",
|
||||
NodeID: nodeID,
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
Allocatable: &storage.VolumeNodeResources{Count: utilpointer.Int32Ptr(20)},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1521,13 +1570,90 @@ func TestCSINodeUpdateValidation(t *testing.T) {
|
||||
Drivers: []storage.CSINodeDriver{
|
||||
{
|
||||
Name: "io.kubernetes.storage.csi.driver-1",
|
||||
NodeID: "nodeB",
|
||||
NodeID: nodeID,
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
},
|
||||
{
|
||||
Name: "io.kubernetes.storage.csi.driver-2",
|
||||
NodeID: nodeID,
|
||||
TopologyKeys: []string{"company.com/zone2"},
|
||||
Allocatable: &storage.VolumeNodeResources{Count: utilpointer.Int32Ptr(20)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// invalid change trying to set a previously unset allocatable
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo1"},
|
||||
Spec: storage.CSINodeSpec{
|
||||
Drivers: []storage.CSINodeDriver{
|
||||
{
|
||||
Name: "io.kubernetes.storage.csi.driver-1",
|
||||
NodeID: nodeID,
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
Allocatable: &storage.VolumeNodeResources{Count: utilpointer.Int32Ptr(10)},
|
||||
},
|
||||
{
|
||||
Name: "io.kubernetes.storage.csi.driver-2",
|
||||
NodeID: nodeID,
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
Allocatable: &storage.VolumeNodeResources{Count: utilpointer.Int32Ptr(20)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// invalid change trying to update allocatable with a different volume limit
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo1"},
|
||||
Spec: storage.CSINodeSpec{
|
||||
Drivers: []storage.CSINodeDriver{
|
||||
{
|
||||
Name: "io.kubernetes.storage.csi.driver-1",
|
||||
NodeID: nodeID,
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
},
|
||||
{
|
||||
Name: "io.kubernetes.storage.csi.driver-2",
|
||||
NodeID: nodeID,
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
Allocatable: &storage.VolumeNodeResources{Count: utilpointer.Int32Ptr(21)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// invalid change trying to update allocatable with an empty volume limit
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo1"},
|
||||
Spec: storage.CSINodeSpec{
|
||||
Drivers: []storage.CSINodeDriver{
|
||||
{
|
||||
Name: "io.kubernetes.storage.csi.driver-1",
|
||||
NodeID: nodeID,
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
},
|
||||
{
|
||||
Name: "io.kubernetes.storage.csi.driver-2",
|
||||
NodeID: nodeID,
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
Allocatable: &storage.VolumeNodeResources{Count: nil},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// invalid change trying to remove allocatable
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "foo1"},
|
||||
Spec: storage.CSINodeSpec{
|
||||
Drivers: []storage.CSINodeDriver{
|
||||
{
|
||||
Name: "io.kubernetes.storage.csi.driver-1",
|
||||
NodeID: nodeID,
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
},
|
||||
{
|
||||
Name: "io.kubernetes.storage.csi.driver-2",
|
||||
NodeID: nodeID,
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
26
pkg/apis/storage/zz_generated.deepcopy.go
generated
26
pkg/apis/storage/zz_generated.deepcopy.go
generated
@ -146,6 +146,11 @@ func (in *CSINodeDriver) DeepCopyInto(out *CSINodeDriver) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Allocatable != nil {
|
||||
in, out := &in.Allocatable, &out.Allocatable
|
||||
*out = new(VolumeNodeResources)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -461,3 +466,24 @@ func (in *VolumeError) DeepCopy() *VolumeError {
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VolumeNodeResources) DeepCopyInto(out *VolumeNodeResources) {
|
||||
*out = *in
|
||||
if in.Count != nil {
|
||||
in, out := &in.Count, &out.Count
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeResources.
|
||||
func (in *VolumeNodeResources) DeepCopy() *VolumeNodeResources {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VolumeNodeResources)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
@ -12,9 +12,11 @@ go_library(
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/storage:go_default_library",
|
||||
"//pkg/apis/storage/validation:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/storage/names:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -41,8 +43,12 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/apis/storage:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -22,9 +22,11 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/storage"
|
||||
"k8s.io/kubernetes/pkg/apis/storage/validation"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
// csiNodeStrategy implements behavior for CSINode objects
|
||||
@ -41,8 +43,14 @@ func (csiNodeStrategy) NamespaceScoped() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// ResetBeforeCreate clears the Status field which is not allowed to be set by end users on creation.
|
||||
// PrepareForCreate clears fields that are not allowed to be set on creation.
|
||||
func (csiNodeStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
|
||||
csiNode := obj.(*storage.CSINode)
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
||||
for i := range csiNode.Spec.Drivers {
|
||||
csiNode.Spec.Drivers[i].Allocatable = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (csiNodeStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
|
||||
@ -62,8 +70,33 @@ func (csiNodeStrategy) AllowCreateOnUpdate() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PrepareForUpdate sets the Status fields which is not allowed to be set by an end user updating a CSINode
|
||||
// PrepareForUpdate sets the driver's Allocatable fields that are not allowed to be set by an end user updating a CSINode.
|
||||
func (csiNodeStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
|
||||
newCSINode := obj.(*storage.CSINode)
|
||||
oldCSINode := old.(*storage.CSINode)
|
||||
|
||||
inUse := getAllocatablesInUse(oldCSINode)
|
||||
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
||||
for i := range newCSINode.Spec.Drivers {
|
||||
if !inUse[newCSINode.Spec.Drivers[i].Name] {
|
||||
newCSINode.Spec.Drivers[i].Allocatable = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getAllocatablesInUse(obj *storage.CSINode) map[string]bool {
|
||||
inUse := make(map[string]bool)
|
||||
if obj == nil {
|
||||
return inUse
|
||||
}
|
||||
for i := range obj.Spec.Drivers {
|
||||
if obj.Spec.Drivers[i].Allocatable != nil {
|
||||
inUse[obj.Spec.Drivers[i].Name] = true
|
||||
}
|
||||
}
|
||||
return inUse
|
||||
}
|
||||
|
||||
func (csiNodeStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
||||
|
@ -17,18 +17,24 @@ limitations under the License.
|
||||
package csinode
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/apis/storage"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func getValidCSINode(name string) *storage.CSINode {
|
||||
return &storage.CSINode{
|
||||
func TestPrepareForCreate(t *testing.T) {
|
||||
valid := getValidCSINode("foo")
|
||||
emptyAllocatable := &storage.CSINode{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: storage.CSINodeSpec{
|
||||
Drivers: []storage.CSINodeDriver{
|
||||
@ -40,6 +46,171 @@ func getValidCSINode(name string) *storage.CSINode {
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
volumeLimitsEnabledCases := []struct {
|
||||
name string
|
||||
obj *storage.CSINode
|
||||
expected *storage.CSINode
|
||||
}{
|
||||
{
|
||||
"empty allocatable",
|
||||
emptyAllocatable,
|
||||
emptyAllocatable,
|
||||
},
|
||||
{
|
||||
"valid allocatable",
|
||||
valid,
|
||||
valid,
|
||||
},
|
||||
}
|
||||
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
||||
for _, test := range volumeLimitsEnabledCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testPrepareForCreate(t, test.obj, test.expected)
|
||||
})
|
||||
}
|
||||
|
||||
volumeLimitsDisabledCases := []struct {
|
||||
name string
|
||||
obj *storage.CSINode
|
||||
expected *storage.CSINode
|
||||
}{
|
||||
{
|
||||
"empty allocatable",
|
||||
emptyAllocatable,
|
||||
emptyAllocatable,
|
||||
},
|
||||
{
|
||||
"drop allocatable",
|
||||
valid,
|
||||
emptyAllocatable,
|
||||
},
|
||||
}
|
||||
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, false)()
|
||||
for _, test := range volumeLimitsDisabledCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testPrepareForCreate(t, test.obj, test.expected)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testPrepareForCreate(t *testing.T, obj, expected *storage.CSINode) {
|
||||
ctx := genericapirequest.WithRequestInfo(genericapirequest.NewContext(), &genericapirequest.RequestInfo{
|
||||
APIGroup: "storage.k8s.io",
|
||||
APIVersion: "v1beta1",
|
||||
Resource: "csinodes",
|
||||
})
|
||||
Strategy.PrepareForCreate(ctx, obj)
|
||||
if !reflect.DeepEqual(*expected, *obj) {
|
||||
t.Errorf("Object mismatch! Expected:\n%#v\ngot:\n%#v", *expected, *obj)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrepareForUpdate(t *testing.T) {
|
||||
valid := getValidCSINode("foo")
|
||||
differentAllocatable := &storage.CSINode{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: storage.CSINodeSpec{
|
||||
Drivers: []storage.CSINodeDriver{
|
||||
{
|
||||
Name: "valid-driver-name",
|
||||
NodeID: "valid-node",
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
Allocatable: &storage.VolumeNodeResources{Count: utilpointer.Int32Ptr(20)},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
emptyAllocatable := &storage.CSINode{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: storage.CSINodeSpec{
|
||||
Drivers: []storage.CSINodeDriver{
|
||||
{
|
||||
Name: "valid-driver-name",
|
||||
NodeID: "valid-node",
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
volumeLimitsEnabledCases := []struct {
|
||||
name string
|
||||
old *storage.CSINode
|
||||
new *storage.CSINode
|
||||
expected *storage.CSINode
|
||||
}{
|
||||
{
|
||||
"allow empty allocatable when it's not set",
|
||||
emptyAllocatable,
|
||||
emptyAllocatable,
|
||||
emptyAllocatable,
|
||||
},
|
||||
{
|
||||
"allow valid allocatable when it's already set",
|
||||
valid,
|
||||
differentAllocatable,
|
||||
differentAllocatable,
|
||||
},
|
||||
{
|
||||
"allow valid allocatable when it's not set",
|
||||
emptyAllocatable,
|
||||
valid,
|
||||
valid,
|
||||
},
|
||||
}
|
||||
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
||||
for _, test := range volumeLimitsEnabledCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testPrepareForUpdate(t, test.new, test.old, test.expected)
|
||||
})
|
||||
}
|
||||
|
||||
volumeLimitsDisabledCases := []struct {
|
||||
name string
|
||||
old *storage.CSINode
|
||||
new *storage.CSINode
|
||||
expected *storage.CSINode
|
||||
}{
|
||||
{
|
||||
"allow empty allocatable when it's not set",
|
||||
emptyAllocatable,
|
||||
emptyAllocatable,
|
||||
emptyAllocatable,
|
||||
},
|
||||
{
|
||||
"drop allocatable when it's not set",
|
||||
emptyAllocatable,
|
||||
valid,
|
||||
emptyAllocatable,
|
||||
},
|
||||
}
|
||||
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, false)()
|
||||
for _, test := range volumeLimitsDisabledCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testPrepareForUpdate(t, test.new, test.old, test.expected)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func testPrepareForUpdate(t *testing.T, obj, old, expected *storage.CSINode) {
|
||||
ctx := genericapirequest.WithRequestInfo(genericapirequest.NewContext(), &genericapirequest.RequestInfo{
|
||||
APIGroup: "storage.k8s.io",
|
||||
APIVersion: "v1beta1",
|
||||
Resource: "csinodes",
|
||||
})
|
||||
Strategy.PrepareForUpdate(ctx, obj, old)
|
||||
if !reflect.DeepEqual(*expected, *obj) {
|
||||
t.Errorf("Object mismatch! Expected:\n%#v\ngot:\n%#v", *expected, *obj)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCSINodeStrategy(t *testing.T) {
|
||||
@ -87,6 +258,43 @@ func TestCSINodeValidation(t *testing.T) {
|
||||
getValidCSINode("foo"),
|
||||
false,
|
||||
},
|
||||
{
|
||||
"valid csinode with empty allocatable",
|
||||
&storage.CSINode{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: storage.CSINodeSpec{
|
||||
Drivers: []storage.CSINodeDriver{
|
||||
{
|
||||
Name: "valid-driver-name",
|
||||
NodeID: "valid-node",
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"valid csinode with missing volume limits",
|
||||
&storage.CSINode{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: storage.CSINodeSpec{
|
||||
Drivers: []storage.CSINodeDriver{
|
||||
{
|
||||
Name: "valid-driver-name",
|
||||
NodeID: "valid-node",
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
Allocatable: &storage.VolumeNodeResources{Count: nil},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"invalid driver name",
|
||||
&storage.CSINode{
|
||||
@ -99,6 +307,7 @@ func TestCSINodeValidation(t *testing.T) {
|
||||
Name: "$csi-driver@",
|
||||
NodeID: "valid-node",
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
Allocatable: &storage.VolumeNodeResources{Count: utilpointer.Int32Ptr(10)},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -117,6 +326,26 @@ func TestCSINodeValidation(t *testing.T) {
|
||||
Name: "valid-driver-name",
|
||||
NodeID: "",
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
Allocatable: &storage.VolumeNodeResources{Count: utilpointer.Int32Ptr(10)},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"invalid allocatable with negative volumes limit",
|
||||
&storage.CSINode{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: storage.CSINodeSpec{
|
||||
Drivers: []storage.CSINodeDriver{
|
||||
{
|
||||
Name: "valid-driver-name",
|
||||
NodeID: "valid-node",
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
Allocatable: &storage.VolumeNodeResources{Count: utilpointer.Int32Ptr(-1)},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -135,6 +364,7 @@ func TestCSINodeValidation(t *testing.T) {
|
||||
Name: "valid-driver-name",
|
||||
NodeID: "valid-node",
|
||||
TopologyKeys: []string{"company.com/zone1", ""},
|
||||
Allocatable: &storage.VolumeNodeResources{Count: utilpointer.Int32Ptr(10)},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -165,3 +395,21 @@ func TestCSINodeValidation(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func getValidCSINode(name string) *storage.CSINode {
|
||||
return &storage.CSINode{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: storage.CSINodeSpec{
|
||||
Drivers: []storage.CSINodeDriver{
|
||||
{
|
||||
Name: "valid-driver-name",
|
||||
NodeID: "valid-node",
|
||||
TopologyKeys: []string{"company.com/zone1", "company.com/zone2"},
|
||||
Allocatable: &storage.VolumeNodeResources{Count: utilpointer.Int32Ptr(10)},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -10,6 +10,7 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/api:go_default_library",
|
||||
@ -23,15 +24,18 @@ go_library(
|
||||
"//pkg/scheduler/metrics:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
|
@ -30,6 +30,7 @@ go_library(
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
@ -41,6 +42,8 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library",
|
||||
"//staging/src/k8s.io/csi-translation-lib:go_default_library",
|
||||
"//staging/src/k8s.io/csi-translation-lib/plugins:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -64,12 +67,15 @@ go_test(
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||
"//staging/src/k8s.io/csi-translation-lib/plugins:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -19,9 +19,11 @@ package predicates
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
csilib "k8s.io/csi-translation-lib"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
@ -30,9 +32,10 @@ import (
|
||||
|
||||
// CSIMaxVolumeLimitChecker defines predicate needed for counting CSI volumes
|
||||
type CSIMaxVolumeLimitChecker struct {
|
||||
pvInfo PersistentVolumeInfo
|
||||
pvcInfo PersistentVolumeClaimInfo
|
||||
scInfo StorageClassInfo
|
||||
pvInfo PersistentVolumeInfo
|
||||
pvcInfo PersistentVolumeClaimInfo
|
||||
scInfo StorageClassInfo
|
||||
|
||||
randomVolumeIDPrefix string
|
||||
}
|
||||
|
||||
@ -50,52 +53,48 @@ func NewCSIMaxVolumeLimitPredicate(
|
||||
|
||||
func (c *CSIMaxVolumeLimitChecker) attachableLimitPredicate(
|
||||
pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
|
||||
|
||||
// if feature gate is disable we return
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
||||
return true, nil, nil
|
||||
}
|
||||
// If a pod doesn't have any volume attached to it, the predicate will always be true.
|
||||
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
|
||||
// If the new pod doesn't have any volume attached to it, the predicate will always be true
|
||||
if len(pod.Spec.Volumes) == 0 {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
nodeVolumeLimits := nodeInfo.VolumeLimits()
|
||||
|
||||
// if node does not have volume limits this predicate should exit
|
||||
if len(nodeVolumeLimits) == 0 {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// a map of unique volume name/csi volume handle and volume limit key
|
||||
newVolumes := make(map[string]string)
|
||||
if err := c.filterAttachableVolumes(pod.Spec.Volumes, pod.Namespace, newVolumes); err != nil {
|
||||
if err := c.filterAttachableVolumes(nodeInfo, pod.Spec.Volumes, pod.Namespace, newVolumes); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
// If the pod doesn't have any new CSI volumes, the predicate will always be true
|
||||
if len(newVolumes) == 0 {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// a map of unique volume name/csi volume handle and volume limit key
|
||||
// If the node doesn't have volume limits, the predicate will always be true
|
||||
nodeVolumeLimits := nodeInfo.VolumeLimits()
|
||||
if len(nodeVolumeLimits) == 0 {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
attachedVolumes := make(map[string]string)
|
||||
for _, existingPod := range nodeInfo.Pods() {
|
||||
if err := c.filterAttachableVolumes(existingPod.Spec.Volumes, existingPod.Namespace, attachedVolumes); err != nil {
|
||||
if err := c.filterAttachableVolumes(nodeInfo, existingPod.Spec.Volumes, existingPod.Namespace, attachedVolumes); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
newVolumeCount := map[string]int{}
|
||||
attachedVolumeCount := map[string]int{}
|
||||
|
||||
for volumeName, volumeLimitKey := range attachedVolumes {
|
||||
if _, ok := newVolumes[volumeName]; ok {
|
||||
delete(newVolumes, volumeName)
|
||||
for volumeUniqueName, volumeLimitKey := range attachedVolumes {
|
||||
if _, ok := newVolumes[volumeUniqueName]; ok {
|
||||
// Don't count single volume used in multiple pods more than once
|
||||
delete(newVolumes, volumeUniqueName)
|
||||
}
|
||||
attachedVolumeCount[volumeLimitKey]++
|
||||
}
|
||||
|
||||
newVolumeCount := map[string]int{}
|
||||
for _, volumeLimitKey := range newVolumes {
|
||||
newVolumeCount[volumeLimitKey]++
|
||||
}
|
||||
@ -114,7 +113,7 @@ func (c *CSIMaxVolumeLimitChecker) attachableLimitPredicate(
|
||||
}
|
||||
|
||||
func (c *CSIMaxVolumeLimitChecker) filterAttachableVolumes(
|
||||
volumes []v1.Volume, namespace string, result map[string]string) error {
|
||||
nodeInfo *schedulernodeinfo.NodeInfo, volumes []v1.Volume, namespace string, result map[string]string) error {
|
||||
|
||||
for _, vol := range volumes {
|
||||
// CSI volumes can only be used as persistent volumes
|
||||
@ -130,74 +129,119 @@ func (c *CSIMaxVolumeLimitChecker) filterAttachableVolumes(
|
||||
pvc, err := c.pvcInfo.GetPersistentVolumeClaimInfo(namespace, pvcName)
|
||||
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Unable to look up PVC info for %s/%s", namespace, pvcName)
|
||||
klog.V(5).Infof("Unable to look up PVC info for %s/%s", namespace, pvcName)
|
||||
continue
|
||||
}
|
||||
|
||||
driverName, volumeHandle := c.getCSIDriver(pvc)
|
||||
// if we can't find driver name or volume handle - we don't count this volume.
|
||||
csiNode := nodeInfo.CSINode()
|
||||
driverName, volumeHandle := c.getCSIDriverInfo(csiNode, pvc)
|
||||
if driverName == "" || volumeHandle == "" {
|
||||
klog.V(5).Infof("Could not find a CSI driver name or volume handle, not counting volume")
|
||||
continue
|
||||
}
|
||||
volumeLimitKey := volumeutil.GetCSIAttachLimitKey(driverName)
|
||||
result[volumeHandle] = volumeLimitKey
|
||||
|
||||
volumeUniqueName := fmt.Sprintf("%s/%s", driverName, volumeHandle)
|
||||
volumeLimitKey := volumeutil.GetCSIAttachLimitKey(driverName)
|
||||
result[volumeUniqueName] = volumeLimitKey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CSIMaxVolumeLimitChecker) getCSIDriver(pvc *v1.PersistentVolumeClaim) (string, string) {
|
||||
// getCSIDriverInfo returns the CSI driver name and volume ID of a given PVC.
|
||||
// If the PVC is from a migrated in-tree plugin, this function will return
|
||||
// the information of the CSI driver that the plugin has been migrated to.
|
||||
func (c *CSIMaxVolumeLimitChecker) getCSIDriverInfo(csiNode *storagev1beta1.CSINode, pvc *v1.PersistentVolumeClaim) (string, string) {
|
||||
pvName := pvc.Spec.VolumeName
|
||||
namespace := pvc.Namespace
|
||||
pvcName := pvc.Name
|
||||
|
||||
placeHolderCSIDriver := ""
|
||||
placeHolderHandle := ""
|
||||
if pvName == "" {
|
||||
klog.V(5).Infof("Persistent volume had no name for claim %s/%s", namespace, pvcName)
|
||||
return c.getDriverNameFromSC(pvc)
|
||||
return c.getCSIDriverInfoFromSC(csiNode, pvc)
|
||||
}
|
||||
pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName)
|
||||
|
||||
pv, err := c.pvInfo.GetPersistentVolumeInfo(pvName)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Unable to look up PV info for PVC %s/%s and PV %s", namespace, pvcName, pvName)
|
||||
klog.V(5).Infof("Unable to look up PV info for PVC %s/%s and PV %s", namespace, pvcName, pvName)
|
||||
// If we can't fetch PV associated with PVC, may be it got deleted
|
||||
// or PVC was prebound to a PVC that hasn't been created yet.
|
||||
// fallback to using StorageClass for volume counting
|
||||
return c.getDriverNameFromSC(pvc)
|
||||
return c.getCSIDriverInfoFromSC(csiNode, pvc)
|
||||
}
|
||||
|
||||
csiSource := pv.Spec.PersistentVolumeSource.CSI
|
||||
if csiSource == nil {
|
||||
klog.V(5).Infof("Not considering non-CSI volume %s/%s", namespace, pvcName)
|
||||
return placeHolderCSIDriver, placeHolderHandle
|
||||
// We make a fast path for non-CSI volumes that aren't migratable
|
||||
if !csilib.IsPVMigratable(pv) {
|
||||
return "", ""
|
||||
}
|
||||
|
||||
pluginName, err := csilib.GetInTreePluginNameFromSpec(pv, nil)
|
||||
if err != nil {
|
||||
klog.V(5).Infof("Unable to look up plugin name from PV spec: %v", err)
|
||||
return "", ""
|
||||
}
|
||||
|
||||
if !isCSIMigrationOn(csiNode, pluginName) {
|
||||
klog.V(5).Infof("CSI Migration of plugin %s is not enabled", pluginName)
|
||||
return "", ""
|
||||
}
|
||||
|
||||
csiPV, err := csilib.TranslateInTreePVToCSI(pv)
|
||||
if err != nil {
|
||||
klog.V(5).Infof("Unable to translate in-tree volume to CSI: %v", err)
|
||||
return "", ""
|
||||
}
|
||||
|
||||
if csiPV.Spec.PersistentVolumeSource.CSI == nil {
|
||||
klog.V(5).Infof("Unable to get a valid volume source for translated PV %s", pvName)
|
||||
return "", ""
|
||||
}
|
||||
|
||||
csiSource = csiPV.Spec.PersistentVolumeSource.CSI
|
||||
}
|
||||
|
||||
return csiSource.Driver, csiSource.VolumeHandle
|
||||
}
|
||||
|
||||
func (c *CSIMaxVolumeLimitChecker) getDriverNameFromSC(pvc *v1.PersistentVolumeClaim) (string, string) {
|
||||
// getCSIDriverInfoFromSC returns the CSI driver name and a random volume ID of a given PVC's StorageClass.
|
||||
func (c *CSIMaxVolumeLimitChecker) getCSIDriverInfoFromSC(csiNode *storagev1beta1.CSINode, pvc *v1.PersistentVolumeClaim) (string, string) {
|
||||
namespace := pvc.Namespace
|
||||
pvcName := pvc.Name
|
||||
scName := pvc.Spec.StorageClassName
|
||||
|
||||
placeHolderCSIDriver := ""
|
||||
placeHolderHandle := ""
|
||||
// If StorageClass is not set or not found, then PVC must be using immediate binding mode
|
||||
// and hence it must be bound before scheduling. So it is safe to not count it.
|
||||
if scName == nil {
|
||||
// if StorageClass is not set or found, then PVC must be using immediate binding mode
|
||||
// and hence it must be bound before scheduling. So it is safe to not count it.
|
||||
klog.V(5).Infof("pvc %s/%s has no storageClass", namespace, pvcName)
|
||||
return placeHolderCSIDriver, placeHolderHandle
|
||||
klog.V(5).Infof("PVC %s/%s has no StorageClass", namespace, pvcName)
|
||||
return "", ""
|
||||
}
|
||||
|
||||
storageClass, err := c.scInfo.GetStorageClassInfo(*scName)
|
||||
if err != nil {
|
||||
klog.V(5).Infof("no storage %s found for pvc %s/%s", *scName, namespace, pvcName)
|
||||
return placeHolderCSIDriver, placeHolderHandle
|
||||
klog.V(5).Infof("Could not get StorageClass for PVC %s/%s: %v", namespace, pvcName, err)
|
||||
return "", ""
|
||||
}
|
||||
|
||||
// We use random prefix to avoid conflict with volume-ids. If PVC is bound in the middle
|
||||
// predicate and there is another pod(on same node) that uses same volume then we will overcount
|
||||
// We use random prefix to avoid conflict with volume IDs. If PVC is bound during the execution of the
|
||||
// predicate and there is another pod on the same node that uses same volume, then we will overcount
|
||||
// the volume and consider both volumes as different.
|
||||
volumeHandle := fmt.Sprintf("%s-%s/%s", c.randomVolumeIDPrefix, namespace, pvcName)
|
||||
return storageClass.Provisioner, volumeHandle
|
||||
|
||||
provisioner := storageClass.Provisioner
|
||||
if csilib.IsMigratableIntreePluginByName(provisioner) {
|
||||
if !isCSIMigrationOn(csiNode, provisioner) {
|
||||
klog.V(5).Infof("CSI Migration of plugin %s is not enabled", provisioner)
|
||||
return "", ""
|
||||
}
|
||||
|
||||
driverName, err := csilib.GetCSINameFromInTreeName(provisioner)
|
||||
if err != nil {
|
||||
klog.V(5).Infof("Unable to look up driver name from plugin name: %v", err)
|
||||
return "", ""
|
||||
}
|
||||
return driverName, volumeHandle
|
||||
}
|
||||
|
||||
return provisioner, volumeHandle
|
||||
}
|
||||
|
@ -19,58 +19,34 @@ package predicates
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
const (
|
||||
ebsCSIDriverName = csilibplugins.AWSEBSDriverName
|
||||
gceCSIDriverName = csilibplugins.GCEPDDriverName
|
||||
|
||||
hostpathInTreePluginName = "kubernetes.io/hostpath"
|
||||
)
|
||||
|
||||
func TestCSIVolumeCountPredicate(t *testing.T) {
|
||||
// for pods with CSI pvcs
|
||||
oneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
twoVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "cs-ebs-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
runningPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs-3",
|
||||
ClaimName: "csi-ebs.csi.aws.com-3",
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -140,14 +116,95 @@ func TestCSIVolumeCountPredicate(t *testing.T) {
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "cs-gce-1",
|
||||
ClaimName: "csi-pd.csi.storage.gke.io-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-gce-2",
|
||||
ClaimName: "csi-pd.csi.storage.gke.io-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// In-tree volumes
|
||||
inTreeOneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-kubernetes.io/aws-ebs-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
inTreeTwoVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-kubernetes.io/aws-ebs-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-kubernetes.io/aws-ebs-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// pods with matching csi driver names
|
||||
csiEBSOneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs.csi.aws.com-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
csiEBSTwoVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs.csi.aws.com-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-ebs.csi.aws.com-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
inTreeNonMigratableOneVolPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: "csi-kubernetes.io/hostpath-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -156,112 +213,260 @@ func TestCSIVolumeCountPredicate(t *testing.T) {
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
newPod *v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
filterName string
|
||||
maxVols int
|
||||
driverNames []string
|
||||
fits bool
|
||||
test string
|
||||
newPod *v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
filterName string
|
||||
maxVols int
|
||||
driverNames []string
|
||||
fits bool
|
||||
test string
|
||||
migrationEnabled bool
|
||||
limitSource string
|
||||
expectedFailureReason *PredicateFailureError
|
||||
}{
|
||||
{
|
||||
newPod: oneVolPod,
|
||||
existingPods: []*v1.Pod{runningPod, twoVolPod},
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 4,
|
||||
driverNames: []string{"ebs"},
|
||||
driverNames: []string{ebsCSIDriverName},
|
||||
fits: true,
|
||||
test: "fits when node capacity >= new pods CSI volume",
|
||||
test: "fits when node volume limit >= new pods CSI volume",
|
||||
limitSource: "node",
|
||||
},
|
||||
{
|
||||
newPod: oneVolPod,
|
||||
existingPods: []*v1.Pod{runningPod, twoVolPod},
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{"ebs"},
|
||||
driverNames: []string{ebsCSIDriverName},
|
||||
fits: false,
|
||||
test: "doesn't when node capacity <= pods CSI volume",
|
||||
test: "doesn't when node volume limit <= pods CSI volume",
|
||||
limitSource: "node",
|
||||
},
|
||||
{
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{ebsCSIDriverName},
|
||||
fits: true,
|
||||
test: "should when driver does not support volume limits",
|
||||
limitSource: "csinode-with-no-limit",
|
||||
},
|
||||
// should count pending PVCs
|
||||
{
|
||||
newPod: oneVolPod,
|
||||
existingPods: []*v1.Pod{pendingVolumePod, twoVolPod},
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{pendingVolumePod, csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{"ebs"},
|
||||
driverNames: []string{ebsCSIDriverName},
|
||||
fits: false,
|
||||
test: "count pending PVCs towards capacity <= pods CSI volume",
|
||||
test: "count pending PVCs towards volume limit <= pods CSI volume",
|
||||
limitSource: "node",
|
||||
},
|
||||
// two same pending PVCs should be counted as 1
|
||||
{
|
||||
newPod: oneVolPod,
|
||||
existingPods: []*v1.Pod{pendingVolumePod, unboundPVCPod2, twoVolPod},
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{pendingVolumePod, unboundPVCPod2, csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 3,
|
||||
driverNames: []string{"ebs"},
|
||||
maxVols: 4,
|
||||
driverNames: []string{ebsCSIDriverName},
|
||||
fits: true,
|
||||
test: "count multiple pending pvcs towards capacity >= pods CSI volume",
|
||||
test: "count multiple pending pvcs towards volume limit >= pods CSI volume",
|
||||
limitSource: "node",
|
||||
},
|
||||
// should count PVCs with invalid PV name but valid SC
|
||||
{
|
||||
newPod: oneVolPod,
|
||||
existingPods: []*v1.Pod{missingPVPod, twoVolPod},
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{missingPVPod, csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{"ebs"},
|
||||
driverNames: []string{ebsCSIDriverName},
|
||||
fits: false,
|
||||
test: "should count PVCs with invalid PV name but valid SC",
|
||||
limitSource: "node",
|
||||
},
|
||||
// don't count a volume which has storageclass missing
|
||||
{
|
||||
newPod: oneVolPod,
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{runningPod, noSCPVCPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{"ebs"},
|
||||
driverNames: []string{ebsCSIDriverName},
|
||||
fits: true,
|
||||
test: "don't count pvcs with missing SC towards capacity",
|
||||
test: "don't count pvcs with missing SC towards volume limit",
|
||||
limitSource: "node",
|
||||
},
|
||||
// don't count multiple volume types
|
||||
{
|
||||
newPod: oneVolPod,
|
||||
existingPods: []*v1.Pod{gceTwoVolPod, twoVolPod},
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{gceTwoVolPod, csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{"ebs", "gce"},
|
||||
fits: true,
|
||||
test: "don't count pvcs with different type towards capacity",
|
||||
driverNames: []string{ebsCSIDriverName, gceCSIDriverName},
|
||||
fits: false,
|
||||
test: "count pvcs with the same type towards volume limit",
|
||||
limitSource: "node",
|
||||
},
|
||||
{
|
||||
newPod: gceTwoVolPod,
|
||||
existingPods: []*v1.Pod{twoVolPod, runningPod},
|
||||
existingPods: []*v1.Pod{csiEBSTwoVolPod, runningPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{"ebs", "gce"},
|
||||
driverNames: []string{ebsCSIDriverName, gceCSIDriverName},
|
||||
fits: true,
|
||||
test: "don't count pvcs with different type towards capacity",
|
||||
test: "don't count pvcs with different type towards volume limit",
|
||||
limitSource: "node",
|
||||
},
|
||||
// Tests for in-tree volume migration
|
||||
{
|
||||
newPod: inTreeOneVolPod,
|
||||
existingPods: []*v1.Pod{inTreeTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
fits: false,
|
||||
migrationEnabled: true,
|
||||
limitSource: "csinode",
|
||||
test: "should count in-tree volumes if migration is enabled",
|
||||
},
|
||||
{
|
||||
newPod: pendingVolumePod,
|
||||
existingPods: []*v1.Pod{inTreeTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
fits: false,
|
||||
migrationEnabled: true,
|
||||
limitSource: "csinode",
|
||||
test: "should count unbound in-tree volumes if migration is enabled",
|
||||
},
|
||||
{
|
||||
newPod: inTreeOneVolPod,
|
||||
existingPods: []*v1.Pod{inTreeTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
fits: true,
|
||||
migrationEnabled: false,
|
||||
limitSource: "csinode",
|
||||
test: "should not count in-tree volume if migration is disabled",
|
||||
},
|
||||
{
|
||||
newPod: inTreeOneVolPod,
|
||||
existingPods: []*v1.Pod{inTreeTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
fits: true,
|
||||
migrationEnabled: true,
|
||||
limitSource: "csinode-with-no-limit",
|
||||
test: "should not limit pod if volume used does not report limits",
|
||||
},
|
||||
{
|
||||
newPod: inTreeOneVolPod,
|
||||
existingPods: []*v1.Pod{inTreeTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
fits: true,
|
||||
migrationEnabled: false,
|
||||
limitSource: "csinode-with-no-limit",
|
||||
test: "should not limit in-tree pod if migration is disabled",
|
||||
},
|
||||
{
|
||||
newPod: inTreeNonMigratableOneVolPod,
|
||||
existingPods: []*v1.Pod{csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{hostpathInTreePluginName, ebsCSIDriverName},
|
||||
fits: true,
|
||||
migrationEnabled: true,
|
||||
limitSource: "csinode",
|
||||
test: "should not count non-migratable in-tree volumes",
|
||||
},
|
||||
// mixed volumes
|
||||
{
|
||||
newPod: inTreeOneVolPod,
|
||||
existingPods: []*v1.Pod{csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
fits: false,
|
||||
migrationEnabled: true,
|
||||
limitSource: "csinode",
|
||||
test: "should count in-tree and csi volumes if migration is enabled (when scheduling in-tree volumes)",
|
||||
},
|
||||
{
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{inTreeTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
fits: false,
|
||||
migrationEnabled: true,
|
||||
limitSource: "csinode",
|
||||
test: "should count in-tree and csi volumes if migration is enabled (when scheduling csi volumes)",
|
||||
},
|
||||
{
|
||||
newPod: csiEBSOneVolPod,
|
||||
existingPods: []*v1.Pod{csiEBSTwoVolPod, inTreeTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 3,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
fits: true,
|
||||
migrationEnabled: false,
|
||||
limitSource: "csinode",
|
||||
test: "should not count in-tree and count csi volumes if migration is disabled (when scheduling csi volumes)",
|
||||
},
|
||||
{
|
||||
newPod: inTreeOneVolPod,
|
||||
existingPods: []*v1.Pod{csiEBSTwoVolPod},
|
||||
filterName: "csi",
|
||||
maxVols: 2,
|
||||
driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName},
|
||||
fits: true,
|
||||
migrationEnabled: false,
|
||||
limitSource: "csinode",
|
||||
test: "should not count in-tree and count csi volumes if migration is disabled (when scheduling in-tree volumes)",
|
||||
},
|
||||
}
|
||||
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.AttachVolumeLimit, true)()
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrMaxVolumeCountExceeded}
|
||||
// running attachable predicate tests with feature gate and limit present on nodes
|
||||
for _, test := range tests {
|
||||
node := getNodeWithPodAndVolumeLimits(test.existingPods, int64(test.maxVols), test.driverNames...)
|
||||
pred := NewCSIMaxVolumeLimitPredicate(getFakeCSIPVInfo(test.filterName, test.driverNames...),
|
||||
getFakeCSIPVCInfo(test.filterName, "csi-sc", test.driverNames...),
|
||||
getFakeCSIStorageClassInfo("csi-sc", test.driverNames[0]))
|
||||
t.Run(test.test, func(t *testing.T) {
|
||||
node := getNodeWithPodAndVolumeLimits(test.limitSource, test.existingPods, int64(test.maxVols), test.driverNames...)
|
||||
if test.migrationEnabled {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigration, true)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigrationAWS, true)()
|
||||
enableMigrationOnNode(node, csilibplugins.AWSEBSInTreePluginName)
|
||||
} else {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigration, false)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIMigrationAWS, false)()
|
||||
}
|
||||
|
||||
fits, reasons, err := pred(test.newPod, GetPredicateMetadata(test.newPod, nil), node)
|
||||
if err != nil {
|
||||
t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err)
|
||||
}
|
||||
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
|
||||
t.Errorf("Using allocatable [%s]%s: unexpected failure reasons: %v, want: %v", test.filterName, test.test, reasons, expectedFailureReasons)
|
||||
}
|
||||
if fits != test.fits {
|
||||
t.Errorf("Using allocatable [%s]%s: expected %v, got %v", test.filterName, test.test, test.fits, fits)
|
||||
}
|
||||
expectedFailureReasons := []PredicateFailureReason{ErrMaxVolumeCountExceeded}
|
||||
if test.expectedFailureReason != nil {
|
||||
expectedFailureReasons = []PredicateFailureReason{test.expectedFailureReason}
|
||||
}
|
||||
|
||||
pred := NewCSIMaxVolumeLimitPredicate(getFakeCSIPVInfo(test.filterName, test.driverNames...),
|
||||
getFakeCSIPVCInfo(test.filterName, "csi-sc", test.driverNames...),
|
||||
getFakeCSIStorageClassInfo("csi-sc", test.driverNames[0]))
|
||||
|
||||
fits, reasons, err := pred(test.newPod, GetPredicateMetadata(test.newPod, nil), node)
|
||||
if err != nil {
|
||||
t.Errorf("Using allocatable [%s]%s: unexpected error: %v", test.filterName, test.test, err)
|
||||
}
|
||||
if !fits && !reflect.DeepEqual(expectedFailureReasons, reasons) {
|
||||
t.Errorf("Using allocatable [%s]%s: unexpected failure reasons: %v, want: %v", test.filterName, test.test, reasons, expectedFailureReasons)
|
||||
}
|
||||
if fits != test.fits {
|
||||
t.Errorf("Using allocatable [%s]%s: expected %v, got %v", test.filterName, test.test, test.fits, fits)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -281,6 +486,28 @@ func getFakeCSIPVInfo(volumeName string, driverNames ...string) FakePersistentVo
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
switch driver {
|
||||
case csilibplugins.AWSEBSInTreePluginName:
|
||||
pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
|
||||
VolumeID: volumeHandle,
|
||||
},
|
||||
}
|
||||
case hostpathInTreePluginName:
|
||||
pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: "/tmp",
|
||||
},
|
||||
}
|
||||
default:
|
||||
pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
|
||||
CSI: &v1.CSIPersistentVolumeSource{
|
||||
Driver: driver,
|
||||
VolumeHandle: volumeHandle,
|
||||
},
|
||||
}
|
||||
}
|
||||
pvInfos = append(pvInfos, pv)
|
||||
}
|
||||
|
||||
@ -317,6 +544,22 @@ func getFakeCSIPVCInfo(volumeName, scName string, driverNames ...string) FakePer
|
||||
return pvcInfos
|
||||
}
|
||||
|
||||
func enableMigrationOnNode(nodeInfo *schedulernodeinfo.NodeInfo, pluginName string) {
|
||||
csiNode := nodeInfo.CSINode()
|
||||
nodeInfoAnnotations := csiNode.GetAnnotations()
|
||||
if nodeInfoAnnotations == nil {
|
||||
nodeInfoAnnotations = map[string]string{}
|
||||
}
|
||||
|
||||
newAnnotationSet := sets.NewString()
|
||||
newAnnotationSet.Insert(pluginName)
|
||||
nas := strings.Join(newAnnotationSet.List(), ",")
|
||||
nodeInfoAnnotations[v1.MigratedPluginsAnnotationKey] = nas
|
||||
|
||||
csiNode.Annotations = nodeInfoAnnotations
|
||||
nodeInfo.SetCSINode(csiNode)
|
||||
}
|
||||
|
||||
func getFakeCSIStorageClassInfo(scName, provisionerName string) FakeStorageClassInfo {
|
||||
return FakeStorageClassInfo{
|
||||
{
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/api/storage/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
@ -31,6 +32,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func onePVCPod(filterName string) *v1.Pod {
|
||||
@ -806,7 +808,7 @@ func TestVolumeCountConflicts(t *testing.T) {
|
||||
|
||||
// running attachable predicate tests with feature gate and limit present on nodes
|
||||
for _, test := range tests {
|
||||
node := getNodeWithPodAndVolumeLimits(test.existingPods, int64(test.maxVols), test.filterName)
|
||||
node := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), test.filterName)
|
||||
pred := NewMaxPDVolumeCountPredicate(test.filterName, getFakePVInfo(test.filterName), getFakePVCInfo(test.filterName))
|
||||
fits, reasons, err := pred(test.newPod, GetPredicateMetadata(test.newPod, nil), node)
|
||||
if err != nil {
|
||||
@ -937,18 +939,63 @@ func TestMaxVolumeFuncM4(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func getNodeWithPodAndVolumeLimits(pods []*v1.Pod, limit int64, driverNames ...string) *schedulernodeinfo.NodeInfo {
|
||||
func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int64, driverNames ...string) *schedulernodeinfo.NodeInfo {
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo(pods...)
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"},
|
||||
Status: v1.NodeStatus{
|
||||
Allocatable: v1.ResourceList{},
|
||||
},
|
||||
addLimitToNode := func() {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"},
|
||||
Status: v1.NodeStatus{
|
||||
Allocatable: v1.ResourceList{},
|
||||
},
|
||||
}
|
||||
for _, driver := range driverNames {
|
||||
node.Status.Allocatable[getVolumeLimitKey(driver)] = *resource.NewQuantity(limit, resource.DecimalSI)
|
||||
}
|
||||
nodeInfo.SetNode(node)
|
||||
}
|
||||
for _, driver := range driverNames {
|
||||
node.Status.Allocatable[getVolumeLimitKey(driver)] = *resource.NewQuantity(limit, resource.DecimalSI)
|
||||
|
||||
createCSINode := func() *v1beta1.CSINode {
|
||||
return &v1beta1.CSINode{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "csi-node-for-max-pd-test-1"},
|
||||
Spec: v1beta1.CSINodeSpec{
|
||||
Drivers: []v1beta1.CSINodeDriver{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
addLimitToCSINode := func(addLimits bool) {
|
||||
csiNode := createCSINode()
|
||||
for _, driver := range driverNames {
|
||||
driver := v1beta1.CSINodeDriver{
|
||||
Name: driver,
|
||||
NodeID: "node-for-max-pd-test-1",
|
||||
}
|
||||
if addLimits {
|
||||
driver.Allocatable = &v1beta1.VolumeNodeResources{
|
||||
Count: utilpointer.Int32Ptr(int32(limit)),
|
||||
}
|
||||
}
|
||||
csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, driver)
|
||||
}
|
||||
|
||||
nodeInfo.SetCSINode(csiNode)
|
||||
}
|
||||
switch limitSource {
|
||||
case "node":
|
||||
addLimitToNode()
|
||||
case "csinode":
|
||||
addLimitToCSINode(true)
|
||||
case "both":
|
||||
addLimitToNode()
|
||||
addLimitToCSINode(true)
|
||||
case "csinode-with-no-limit":
|
||||
addLimitToCSINode(false)
|
||||
case "no-csi-driver":
|
||||
csiNode := createCSINode()
|
||||
nodeInfo.SetCSINode(csiNode)
|
||||
default:
|
||||
return nodeInfo
|
||||
}
|
||||
nodeInfo.SetNode(node)
|
||||
return nodeInfo
|
||||
}
|
||||
|
||||
|
@ -25,8 +25,9 @@ import (
|
||||
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
@ -37,6 +38,7 @@ import (
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
volumehelpers "k8s.io/cloud-provider/volume/helpers"
|
||||
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
v1qos "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
@ -315,6 +317,8 @@ type VolumeFilter struct {
|
||||
// Filter normal volumes
|
||||
FilterVolume func(vol *v1.Volume) (id string, relevant bool)
|
||||
FilterPersistentVolume func(pv *v1.PersistentVolume) (id string, relevant bool)
|
||||
// IsMigrated returns a boolean specifying whether the plugin is migrated to a CSI driver
|
||||
IsMigrated func(csiNode *storagev1beta1.CSINode) bool
|
||||
}
|
||||
|
||||
// NewMaxPDVolumeCountPredicate creates a predicate which evaluates whether a pod can fit based on the
|
||||
@ -484,6 +488,11 @@ func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta PredicateMetadata,
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// If a plugin has been migrated to a CSI driver, defer to the CSI predicate.
|
||||
if c.filter.IsMigrated(nodeInfo.CSINode()) {
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// count unique volumes
|
||||
existingVolumes := make(map[string]bool)
|
||||
for _, existingPod := range nodeInfo.Pods() {
|
||||
@ -538,6 +547,10 @@ var EBSVolumeFilter = VolumeFilter{
|
||||
}
|
||||
return "", false
|
||||
},
|
||||
|
||||
IsMigrated: func(csiNode *storagev1beta1.CSINode) bool {
|
||||
return isCSIMigrationOn(csiNode, csilibplugins.AWSEBSInTreePluginName)
|
||||
},
|
||||
}
|
||||
|
||||
// GCEPDVolumeFilter is a VolumeFilter for filtering GCE PersistentDisk Volumes
|
||||
@ -555,6 +568,10 @@ var GCEPDVolumeFilter = VolumeFilter{
|
||||
}
|
||||
return "", false
|
||||
},
|
||||
|
||||
IsMigrated: func(csiNode *storagev1beta1.CSINode) bool {
|
||||
return isCSIMigrationOn(csiNode, csilibplugins.GCEPDInTreePluginName)
|
||||
},
|
||||
}
|
||||
|
||||
// AzureDiskVolumeFilter is a VolumeFilter for filtering Azure Disk Volumes
|
||||
@ -572,6 +589,10 @@ var AzureDiskVolumeFilter = VolumeFilter{
|
||||
}
|
||||
return "", false
|
||||
},
|
||||
|
||||
IsMigrated: func(csiNode *storagev1beta1.CSINode) bool {
|
||||
return isCSIMigrationOn(csiNode, csilibplugins.AzureDiskInTreePluginName)
|
||||
},
|
||||
}
|
||||
|
||||
// CinderVolumeFilter is a VolumeFilter for filtering Cinder Volumes
|
||||
@ -590,6 +611,10 @@ var CinderVolumeFilter = VolumeFilter{
|
||||
}
|
||||
return "", false
|
||||
},
|
||||
|
||||
IsMigrated: func(csiNode *storagev1beta1.CSINode) bool {
|
||||
return isCSIMigrationOn(csiNode, csilibplugins.CinderInTreePluginName)
|
||||
},
|
||||
}
|
||||
|
||||
// VolumeZoneChecker contains information to check the volume zone for a predicate.
|
||||
|
@ -17,8 +17,15 @@ limitations under the License.
|
||||
package predicates
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
@ -87,3 +94,56 @@ func SetPredicatesOrderingDuringTest(value []string) func() {
|
||||
predicatesOrdering = origVal
|
||||
}
|
||||
}
|
||||
|
||||
// isCSIMigrationOn returns a boolean value indicating whether
|
||||
// the CSI migration has been enabled for a particular storage plugin.
|
||||
func isCSIMigrationOn(csiNode *storagev1beta1.CSINode, pluginName string) bool {
|
||||
if csiNode == nil || len(pluginName) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// In-tree storage to CSI driver migration feature should be enabled,
|
||||
// along with the plugin-specific one
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIMigration) {
|
||||
return false
|
||||
}
|
||||
|
||||
switch pluginName {
|
||||
case csilibplugins.AWSEBSInTreePluginName:
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAWS) {
|
||||
return false
|
||||
}
|
||||
case csilibplugins.GCEPDInTreePluginName:
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationGCE) {
|
||||
return false
|
||||
}
|
||||
case csilibplugins.AzureDiskInTreePluginName:
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAzureDisk) {
|
||||
return false
|
||||
}
|
||||
case csilibplugins.CinderInTreePluginName:
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationOpenStack) {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
|
||||
// The plugin name should be listed in the CSINode object annotation.
|
||||
// This indicates that the plugin has been migrated to a CSI driver in the node.
|
||||
csiNodeAnn := csiNode.GetAnnotations()
|
||||
if csiNodeAnn == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
var mpaSet sets.String
|
||||
mpa := csiNodeAnn[v1.MigratedPluginsAnnotationKey]
|
||||
if len(mpa) == 0 {
|
||||
mpaSet = sets.NewString()
|
||||
} else {
|
||||
tok := strings.Split(mpa, ",")
|
||||
mpaSet = sets.NewString(tok...)
|
||||
}
|
||||
|
||||
return mpaSet.Has(pluginName)
|
||||
}
|
||||
|
@ -18,15 +18,20 @@ package scheduler
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"k8s.io/klog"
|
||||
"reflect"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/klog"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
storageinformers "k8s.io/client-go/informers/storage/v1"
|
||||
storageinformersv1 "k8s.io/client-go/informers/storage/v1"
|
||||
storageinformersv1beta1 "k8s.io/client-go/informers/storage/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
func (sched *Scheduler) onPvAdd(obj interface{}) {
|
||||
@ -150,6 +155,63 @@ func (sched *Scheduler) deleteNodeFromCache(obj interface{}) {
|
||||
klog.Errorf("scheduler cache RemoveNode failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (sched *Scheduler) onCSINodeAdd(obj interface{}) {
|
||||
csiNode, ok := obj.(*storagev1beta1.CSINode)
|
||||
if !ok {
|
||||
klog.Errorf("cannot convert to *storagev1beta1.CSINode: %v", obj)
|
||||
return
|
||||
}
|
||||
|
||||
if err := sched.config.SchedulerCache.AddCSINode(csiNode); err != nil {
|
||||
klog.Errorf("scheduler cache AddCSINode failed: %v", err)
|
||||
}
|
||||
|
||||
sched.config.SchedulingQueue.MoveAllToActiveQueue()
|
||||
}
|
||||
|
||||
func (sched *Scheduler) onCSINodeUpdate(oldObj, newObj interface{}) {
|
||||
oldCSINode, ok := oldObj.(*storagev1beta1.CSINode)
|
||||
if !ok {
|
||||
klog.Errorf("cannot convert oldObj to *storagev1beta1.CSINode: %v", oldObj)
|
||||
return
|
||||
}
|
||||
|
||||
newCSINode, ok := newObj.(*storagev1beta1.CSINode)
|
||||
if !ok {
|
||||
klog.Errorf("cannot convert newObj to *storagev1beta1.CSINode: %v", newObj)
|
||||
return
|
||||
}
|
||||
|
||||
if err := sched.config.SchedulerCache.UpdateCSINode(oldCSINode, newCSINode); err != nil {
|
||||
klog.Errorf("scheduler cache UpdateCSINode failed: %v", err)
|
||||
}
|
||||
|
||||
sched.config.SchedulingQueue.MoveAllToActiveQueue()
|
||||
}
|
||||
|
||||
func (sched *Scheduler) onCSINodeDelete(obj interface{}) {
|
||||
var csiNode *storagev1beta1.CSINode
|
||||
switch t := obj.(type) {
|
||||
case *storagev1beta1.CSINode:
|
||||
csiNode = t
|
||||
case cache.DeletedFinalStateUnknown:
|
||||
var ok bool
|
||||
csiNode, ok = t.Obj.(*storagev1beta1.CSINode)
|
||||
if !ok {
|
||||
klog.Errorf("cannot convert to *storagev1beta1.CSINode: %v", t.Obj)
|
||||
return
|
||||
}
|
||||
default:
|
||||
klog.Errorf("cannot convert to *storagev1beta1.CSINode: %v", t)
|
||||
return
|
||||
}
|
||||
|
||||
if err := sched.config.SchedulerCache.RemoveCSINode(csiNode); err != nil {
|
||||
klog.Errorf("scheduler cache RemoveCSINode failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (sched *Scheduler) addPodToSchedulingQueue(obj interface{}) {
|
||||
if err := sched.config.SchedulingQueue.Add(obj.(*v1.Pod)); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to queue %T: %v", obj, err))
|
||||
@ -324,7 +386,8 @@ func AddAllEventHandlers(
|
||||
pvInformer coreinformers.PersistentVolumeInformer,
|
||||
pvcInformer coreinformers.PersistentVolumeClaimInformer,
|
||||
serviceInformer coreinformers.ServiceInformer,
|
||||
storageClassInformer storageinformers.StorageClassInformer,
|
||||
storageClassInformer storageinformersv1.StorageClassInformer,
|
||||
csiNodeInformer storageinformersv1beta1.CSINodeInformer,
|
||||
) {
|
||||
// scheduled pod cache
|
||||
podInformer.Informer().AddEventHandler(
|
||||
@ -385,6 +448,16 @@ func AddAllEventHandlers(
|
||||
},
|
||||
)
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) {
|
||||
csiNodeInformer.Informer().AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: sched.onCSINodeAdd,
|
||||
UpdateFunc: sched.onCSINodeUpdate,
|
||||
DeleteFunc: sched.onCSINodeDelete,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// On add and delete of PVs, it will affect equivalence cache items
|
||||
// related to persistent volume
|
||||
pvInformer.Informer().AddEventHandler(
|
||||
|
@ -38,11 +38,13 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
@ -36,12 +36,14 @@ import (
|
||||
appsinformers "k8s.io/client-go/informers/apps/v1"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
policyinformers "k8s.io/client-go/informers/policy/v1beta1"
|
||||
storageinformers "k8s.io/client-go/informers/storage/v1"
|
||||
storageinformersv1 "k8s.io/client-go/informers/storage/v1"
|
||||
storageinformersv1beta1 "k8s.io/client-go/informers/storage/v1beta1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
policylisters "k8s.io/client-go/listers/policy/v1beta1"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
storagelistersv1 "k8s.io/client-go/listers/storage/v1"
|
||||
storagelistersv1beta1 "k8s.io/client-go/listers/storage/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog"
|
||||
@ -184,7 +186,9 @@ type configFactory struct {
|
||||
// a means to list all PodDisruptionBudgets
|
||||
pdbLister policylisters.PodDisruptionBudgetLister
|
||||
// a means to list all StorageClasses
|
||||
storageClassLister storagelisters.StorageClassLister
|
||||
storageClassLister storagelistersv1.StorageClassLister
|
||||
// a means to list all CSINodes
|
||||
csiNodeLister storagelistersv1beta1.CSINodeLister
|
||||
// framework has a set of plugins and the context used for running them.
|
||||
framework framework.Framework
|
||||
|
||||
@ -236,7 +240,8 @@ type ConfigFactoryArgs struct {
|
||||
StatefulSetInformer appsinformers.StatefulSetInformer
|
||||
ServiceInformer coreinformers.ServiceInformer
|
||||
PdbInformer policyinformers.PodDisruptionBudgetInformer
|
||||
StorageClassInformer storageinformers.StorageClassInformer
|
||||
StorageClassInformer storageinformersv1.StorageClassInformer
|
||||
CSINodeInformer storageinformersv1beta1.CSINodeInformer
|
||||
HardPodAffinitySymmetricWeight int32
|
||||
DisablePreemption bool
|
||||
PercentageOfNodesToScore int32
|
||||
@ -262,10 +267,16 @@ func NewConfigFactory(args *ConfigFactoryArgs) Configurator {
|
||||
}
|
||||
|
||||
// storageClassInformer is only enabled through VolumeScheduling feature gate
|
||||
var storageClassLister storagelisters.StorageClassLister
|
||||
var storageClassLister storagelistersv1.StorageClassLister
|
||||
if args.StorageClassInformer != nil {
|
||||
storageClassLister = args.StorageClassInformer.Lister()
|
||||
}
|
||||
|
||||
var csiNodeLister storagelistersv1beta1.CSINodeLister
|
||||
if args.CSINodeInformer != nil {
|
||||
csiNodeLister = args.CSINodeInformer.Lister()
|
||||
}
|
||||
|
||||
c := &configFactory{
|
||||
client: args.Client,
|
||||
podLister: schedulerCache,
|
||||
@ -279,6 +290,7 @@ func NewConfigFactory(args *ConfigFactoryArgs) Configurator {
|
||||
statefulSetLister: args.StatefulSetInformer.Lister(),
|
||||
pdbLister: args.PdbInformer.Lister(),
|
||||
storageClassLister: storageClassLister,
|
||||
csiNodeLister: csiNodeLister,
|
||||
framework: framework,
|
||||
schedulerCache: schedulerCache,
|
||||
StopEverything: stopEverything,
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
@ -491,6 +491,7 @@ func newConfigFactory(client clientset.Interface, hardPodAffinitySymmetricWeight
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
informerFactory.Storage().V1beta1().CSINodes(),
|
||||
hardPodAffinitySymmetricWeight,
|
||||
disablePodPreemption,
|
||||
schedulerapi.DefaultPercentageOfNodesToScore,
|
||||
|
1
pkg/scheduler/internal/cache/BUILD
vendored
1
pkg/scheduler/internal/cache/BUILD
vendored
@ -15,6 +15,7 @@ go_library(
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
|
44
pkg/scheduler/internal/cache/cache.go
vendored
44
pkg/scheduler/internal/cache/cache.go
vendored
@ -21,7 +21,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -569,6 +570,47 @@ func (cache *schedulerCache) RemoveNode(node *v1.Node) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cache *schedulerCache) AddCSINode(csiNode *storagev1beta1.CSINode) error {
|
||||
cache.mu.Lock()
|
||||
defer cache.mu.Unlock()
|
||||
|
||||
n, ok := cache.nodes[csiNode.Name]
|
||||
if !ok {
|
||||
n = newNodeInfoListItem(schedulernodeinfo.NewNodeInfo())
|
||||
cache.nodes[csiNode.Name] = n
|
||||
}
|
||||
n.info.SetCSINode(csiNode)
|
||||
cache.moveNodeInfoToHead(csiNode.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cache *schedulerCache) UpdateCSINode(oldCSINode, newCSINode *storagev1beta1.CSINode) error {
|
||||
cache.mu.Lock()
|
||||
defer cache.mu.Unlock()
|
||||
|
||||
n, ok := cache.nodes[newCSINode.Name]
|
||||
if !ok {
|
||||
n = newNodeInfoListItem(schedulernodeinfo.NewNodeInfo())
|
||||
cache.nodes[newCSINode.Name] = n
|
||||
}
|
||||
n.info.SetCSINode(newCSINode)
|
||||
cache.moveNodeInfoToHead(newCSINode.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cache *schedulerCache) RemoveCSINode(csiNode *storagev1beta1.CSINode) error {
|
||||
cache.mu.Lock()
|
||||
defer cache.mu.Unlock()
|
||||
|
||||
n, ok := cache.nodes[csiNode.Name]
|
||||
if !ok {
|
||||
return fmt.Errorf("node %v is not found", csiNode.Name)
|
||||
}
|
||||
n.info.SetCSINode(nil)
|
||||
cache.moveNodeInfoToHead(csiNode.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// addNodeImageStates adds states of the images on given node to the given nodeInfo and update the imageStates in
|
||||
// scheduler cache. This function assumes the lock to scheduler cache has been acquired.
|
||||
func (cache *schedulerCache) addNodeImageStates(node *v1.Node, nodeInfo *schedulernodeinfo.NodeInfo) {
|
||||
|
1
pkg/scheduler/internal/cache/fake/BUILD
vendored
1
pkg/scheduler/internal/cache/fake/BUILD
vendored
@ -9,6 +9,7 @@ go_library(
|
||||
"//pkg/scheduler/algorithm:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
],
|
||||
)
|
||||
|
12
pkg/scheduler/internal/cache/fake/fake_cache.go
vendored
12
pkg/scheduler/internal/cache/fake/fake_cache.go
vendored
@ -17,7 +17,8 @@ limitations under the License.
|
||||
package fake
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
@ -74,6 +75,15 @@ func (c *Cache) UpdateNode(oldNode, newNode *v1.Node) error { return nil }
|
||||
// RemoveNode is a fake method for testing.
|
||||
func (c *Cache) RemoveNode(node *v1.Node) error { return nil }
|
||||
|
||||
// AddCSINode is a fake method for testing.
|
||||
func (c *Cache) AddCSINode(csiNode *storagev1beta1.CSINode) error { return nil }
|
||||
|
||||
// UpdateCSINode is a fake method for testing.
|
||||
func (c *Cache) UpdateCSINode(oldCSINode, newCSINode *storagev1beta1.CSINode) error { return nil }
|
||||
|
||||
// RemoveCSINode is a fake method for testing.
|
||||
func (c *Cache) RemoveCSINode(csiNode *storagev1beta1.CSINode) error { return nil }
|
||||
|
||||
// UpdateNodeInfoSnapshot is a fake method for testing.
|
||||
func (c *Cache) UpdateNodeInfoSnapshot(nodeSnapshot *internalcache.NodeInfoSnapshot) error {
|
||||
return nil
|
||||
|
12
pkg/scheduler/internal/cache/interface.go
vendored
12
pkg/scheduler/internal/cache/interface.go
vendored
@ -17,7 +17,8 @@ limitations under the License.
|
||||
package cache
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
@ -100,6 +101,15 @@ type Cache interface {
|
||||
// on this node.
|
||||
UpdateNodeInfoSnapshot(nodeSnapshot *NodeInfoSnapshot) error
|
||||
|
||||
// AddCSINode adds overall CSI-related information about node.
|
||||
AddCSINode(csiNode *storagev1beta1.CSINode) error
|
||||
|
||||
// UpdateCSINode updates overall CSI-related information about node.
|
||||
UpdateCSINode(oldCSINode, newCSINode *storagev1beta1.CSINode) error
|
||||
|
||||
// RemoveCSINode removes overall CSI-related information about node.
|
||||
RemoveCSINode(csiNode *storagev1beta1.CSINode) error
|
||||
|
||||
// List lists all cached pods (including assumed ones).
|
||||
List(labels.Selector) ([]*v1.Pod, error)
|
||||
|
||||
|
@ -12,7 +12,9 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
|
@ -22,12 +22,13 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/klog"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -46,7 +47,8 @@ type ImageStateSummary struct {
|
||||
// NodeInfo is node level aggregated information.
|
||||
type NodeInfo struct {
|
||||
// Overall node information.
|
||||
node *v1.Node
|
||||
node *v1.Node
|
||||
csiNode *storagev1beta1.CSINode
|
||||
|
||||
pods []*v1.Pod
|
||||
podsWithAffinity []*v1.Pod
|
||||
@ -285,6 +287,14 @@ func (n *NodeInfo) Node() *v1.Node {
|
||||
return n.node
|
||||
}
|
||||
|
||||
// CSINode returns overall CSI-related information about this node.
|
||||
func (n *NodeInfo) CSINode() *storagev1beta1.CSINode {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
return n.csiNode
|
||||
}
|
||||
|
||||
// Pods return all pods scheduled (including assumed to be) on this node.
|
||||
func (n *NodeInfo) Pods() []*v1.Pod {
|
||||
if n == nil {
|
||||
@ -434,6 +444,7 @@ func (n *NodeInfo) SetGeneration(newGeneration int64) {
|
||||
func (n *NodeInfo) Clone() *NodeInfo {
|
||||
clone := &NodeInfo{
|
||||
node: n.node,
|
||||
csiNode: n.csiNode,
|
||||
requestedResource: n.requestedResource.Clone(),
|
||||
nonzeroRequest: n.nonzeroRequest.Clone(),
|
||||
allocatableResource: n.allocatableResource.Clone(),
|
||||
@ -471,11 +482,24 @@ func (n *NodeInfo) Clone() *NodeInfo {
|
||||
// VolumeLimits returns volume limits associated with the node
|
||||
func (n *NodeInfo) VolumeLimits() map[v1.ResourceName]int64 {
|
||||
volumeLimits := map[v1.ResourceName]int64{}
|
||||
|
||||
for k, v := range n.AllocatableResource().ScalarResources {
|
||||
if v1helper.IsAttachableVolumeResourceName(k) {
|
||||
volumeLimits[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
if n.csiNode != nil {
|
||||
for i := range n.csiNode.Spec.Drivers {
|
||||
d := n.csiNode.Spec.Drivers[i]
|
||||
if d.Allocatable != nil && d.Allocatable.Count != nil {
|
||||
// TODO: drop GetCSIAttachLimitKey once we don't get values from Node object
|
||||
k := v1.ResourceName(volumeutil.GetCSIAttachLimitKey(d.Name))
|
||||
volumeLimits[k] = int64(*d.Allocatable.Count)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return volumeLimits
|
||||
}
|
||||
|
||||
@ -646,6 +670,11 @@ func (n *NodeInfo) RemoveNode(node *v1.Node) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetCSINode sets the overall CSI-related node information.
|
||||
func (n *NodeInfo) SetCSINode(csiNode *storagev1beta1.CSINode) {
|
||||
n.csiNode = csiNode
|
||||
}
|
||||
|
||||
// FilterOutPods receives a list of pods and filters out those whose node names
|
||||
// are equal to the node of this NodeInfo, but are not found in the pods of this NodeInfo.
|
||||
//
|
||||
|
@ -24,14 +24,15 @@ import (
|
||||
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
appsinformers "k8s.io/client-go/informers/apps/v1"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
policyinformers "k8s.io/client-go/informers/policy/v1beta1"
|
||||
storageinformers "k8s.io/client-go/informers/storage/v1"
|
||||
storageinformersv1 "k8s.io/client-go/informers/storage/v1"
|
||||
storageinformersv1beta1 "k8s.io/client-go/informers/storage/v1beta1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/record"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
@ -127,7 +128,8 @@ func New(client clientset.Interface,
|
||||
statefulSetInformer appsinformers.StatefulSetInformer,
|
||||
serviceInformer coreinformers.ServiceInformer,
|
||||
pdbInformer policyinformers.PodDisruptionBudgetInformer,
|
||||
storageClassInformer storageinformers.StorageClassInformer,
|
||||
storageClassInformer storageinformersv1.StorageClassInformer,
|
||||
csiNodeInformer storageinformersv1beta1.CSINodeInformer,
|
||||
recorder record.EventRecorder,
|
||||
schedulerAlgorithmSource kubeschedulerconfig.SchedulerAlgorithmSource,
|
||||
stopCh <-chan struct{},
|
||||
@ -154,6 +156,7 @@ func New(client clientset.Interface,
|
||||
ServiceInformer: serviceInformer,
|
||||
PdbInformer: pdbInformer,
|
||||
StorageClassInformer: storageClassInformer,
|
||||
CSINodeInformer: csiNodeInformer,
|
||||
HardPodAffinitySymmetricWeight: options.hardPodAffinitySymmetricWeight,
|
||||
DisablePreemption: options.disablePreemption,
|
||||
PercentageOfNodesToScore: options.percentageOfNodesToScore,
|
||||
@ -201,7 +204,7 @@ func New(client clientset.Interface,
|
||||
// Create the scheduler.
|
||||
sched := NewFromConfig(config)
|
||||
|
||||
AddAllEventHandlers(sched, options.schedulerName, nodeInformer, podInformer, pvInformer, pvcInformer, serviceInformer, storageClassInformer)
|
||||
AddAllEventHandlers(sched, options.schedulerName, nodeInformer, podInformer, pvInformer, pvcInformer, serviceInformer, storageClassInformer, csiNodeInformer)
|
||||
return sched, nil
|
||||
}
|
||||
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -200,6 +200,7 @@ func TestSchedulerCreation(t *testing.T) {
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
informerFactory.Storage().V1beta1().CSINodes(),
|
||||
eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "scheduler"}),
|
||||
kubeschedulerconfig.SchedulerAlgorithmSource{Provider: &testSource},
|
||||
stopCh,
|
||||
|
@ -13,7 +13,6 @@ go_library(
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
@ -62,5 +61,6 @@ go_test(
|
||||
"//staging/src/k8s.io/client-go/util/testing:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/utils/pointer:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -22,14 +22,14 @@ import (
|
||||
"encoding/json"
|
||||
goerrors "errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
@ -117,17 +117,13 @@ func (nim *nodeInfoManager) InstallCSIDriver(driverName string, driverNodeID str
|
||||
nodeUpdateFuncs = append(nodeUpdateFuncs, updateTopologyLabels(topology))
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
||||
nodeUpdateFuncs = append(nodeUpdateFuncs, updateMaxAttachLimit(driverName, maxAttachLimit))
|
||||
}
|
||||
|
||||
err := nim.updateNode(nodeUpdateFuncs...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error updating Node object with CSI driver node info: %v", err)
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) {
|
||||
err = nim.updateCSINode(driverName, driverNodeID, topology)
|
||||
err = nim.updateCSINode(driverName, driverNodeID, maxAttachLimit, topology)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error updating CSINode object with CSI driver node info: %v", err)
|
||||
}
|
||||
@ -354,6 +350,7 @@ func updateTopologyLabels(topology map[string]string) nodeUpdateFunc {
|
||||
func (nim *nodeInfoManager) updateCSINode(
|
||||
driverName string,
|
||||
driverNodeID string,
|
||||
maxAttachLimit int64,
|
||||
topology map[string]string) error {
|
||||
|
||||
csiKubeClient := nim.volumeHost.GetKubeClient()
|
||||
@ -363,7 +360,7 @@ func (nim *nodeInfoManager) updateCSINode(
|
||||
|
||||
var updateErrs []error
|
||||
err := wait.ExponentialBackoff(updateBackoff, func() (bool, error) {
|
||||
if err := nim.tryUpdateCSINode(csiKubeClient, driverName, driverNodeID, topology); err != nil {
|
||||
if err := nim.tryUpdateCSINode(csiKubeClient, driverName, driverNodeID, maxAttachLimit, topology); err != nil {
|
||||
updateErrs = append(updateErrs, err)
|
||||
return false, nil
|
||||
}
|
||||
@ -379,6 +376,7 @@ func (nim *nodeInfoManager) tryUpdateCSINode(
|
||||
csiKubeClient clientset.Interface,
|
||||
driverName string,
|
||||
driverNodeID string,
|
||||
maxAttachLimit int64,
|
||||
topology map[string]string) error {
|
||||
|
||||
nodeInfo, err := csiKubeClient.StorageV1beta1().CSINodes().Get(string(nim.nodeName), metav1.GetOptions{})
|
||||
@ -389,7 +387,7 @@ func (nim *nodeInfoManager) tryUpdateCSINode(
|
||||
return err
|
||||
}
|
||||
|
||||
return nim.installDriverToCSINode(nodeInfo, driverName, driverNodeID, topology)
|
||||
return nim.installDriverToCSINode(nodeInfo, driverName, driverNodeID, maxAttachLimit, topology)
|
||||
}
|
||||
|
||||
func (nim *nodeInfoManager) InitializeCSINodeWithAnnotation() error {
|
||||
@ -515,6 +513,7 @@ func (nim *nodeInfoManager) installDriverToCSINode(
|
||||
nodeInfo *storagev1beta1.CSINode,
|
||||
driverName string,
|
||||
driverNodeID string,
|
||||
maxAttachLimit int64,
|
||||
topology map[string]string) error {
|
||||
|
||||
csiKubeClient := nim.volumeHost.GetKubeClient()
|
||||
@ -555,6 +554,19 @@ func (nim *nodeInfoManager) installDriverToCSINode(
|
||||
TopologyKeys: topologyKeys.List(),
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
||||
if maxAttachLimit > 0 {
|
||||
if maxAttachLimit > math.MaxInt32 {
|
||||
klog.Warningf("Exceeded max supported attach limit value, truncating it to %d", math.MaxInt32)
|
||||
maxAttachLimit = math.MaxInt32
|
||||
}
|
||||
m := int32(maxAttachLimit)
|
||||
driverSpec.Allocatable = &storagev1beta1.VolumeNodeResources{Count: &m}
|
||||
} else {
|
||||
klog.Errorf("Invalid attach limit value %d cannot be added to CSINode object for %q", maxAttachLimit, driverName)
|
||||
}
|
||||
}
|
||||
|
||||
newDriverSpecs = append(newDriverSpecs, driverSpec)
|
||||
nodeInfo.Spec.Drivers = newDriverSpecs
|
||||
|
||||
@ -621,27 +633,6 @@ func (nim *nodeInfoManager) tryUninstallDriverFromCSINode(
|
||||
|
||||
}
|
||||
|
||||
func updateMaxAttachLimit(driverName string, maxLimit int64) nodeUpdateFunc {
|
||||
return func(node *v1.Node) (*v1.Node, bool, error) {
|
||||
if maxLimit <= 0 {
|
||||
klog.V(4).Infof("skipping adding attach limit for %s", driverName)
|
||||
return node, false, nil
|
||||
}
|
||||
|
||||
if node.Status.Capacity == nil {
|
||||
node.Status.Capacity = v1.ResourceList{}
|
||||
}
|
||||
if node.Status.Allocatable == nil {
|
||||
node.Status.Allocatable = v1.ResourceList{}
|
||||
}
|
||||
limitKeyName := util.GetCSIAttachLimitKey(driverName)
|
||||
node.Status.Capacity[v1.ResourceName(limitKeyName)] = *resource.NewQuantity(maxLimit, resource.DecimalSI)
|
||||
node.Status.Allocatable[v1.ResourceName(limitKeyName)] = *resource.NewQuantity(maxLimit, resource.DecimalSI)
|
||||
|
||||
return node, true, nil
|
||||
}
|
||||
}
|
||||
|
||||
func removeMaxAttachLimit(driverName string) nodeUpdateFunc {
|
||||
return func(node *v1.Node) (*v1.Node, bool, error) {
|
||||
limitKey := v1.ResourceName(util.GetCSIAttachLimitKey(driverName))
|
||||
|
@ -19,12 +19,14 @@ package nodeinfomanager
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"math"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@ -40,6 +42,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
type testcase struct {
|
||||
@ -107,6 +110,7 @@ func TestInstallCSIDriver(t *testing.T) {
|
||||
nodeIDMap{
|
||||
"com.example.csi.driver1": "com.example.csi/csi-node1",
|
||||
},
|
||||
nil, /* volumeLimits */
|
||||
topologyKeyMap{
|
||||
"com.example.csi.driver1": {"com.example.csi/zone"},
|
||||
},
|
||||
@ -130,6 +134,7 @@ func TestInstallCSIDriver(t *testing.T) {
|
||||
Name: "com.example.csi.driver1",
|
||||
NodeID: "com.example.csi/csi-node1",
|
||||
TopologyKeys: []string{"com.example.csi/zone"},
|
||||
Allocatable: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -147,6 +152,7 @@ func TestInstallCSIDriver(t *testing.T) {
|
||||
nodeIDMap{
|
||||
"com.example.csi.driver1": "com.example.csi/csi-node1",
|
||||
},
|
||||
nil, /* volumeLimits */
|
||||
nil, /* topologyKeys */
|
||||
),
|
||||
inputNodeID: "com.example.csi/csi-node1",
|
||||
@ -168,6 +174,7 @@ func TestInstallCSIDriver(t *testing.T) {
|
||||
Name: "com.example.csi.driver1",
|
||||
NodeID: "com.example.csi/csi-node1",
|
||||
TopologyKeys: []string{"com.example.csi/zone"},
|
||||
Allocatable: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -187,6 +194,7 @@ func TestInstallCSIDriver(t *testing.T) {
|
||||
nodeIDMap{
|
||||
"net.example.storage.other-driver": "net.example.storage/test-node",
|
||||
},
|
||||
nil, /* volumeLimits */
|
||||
topologyKeyMap{
|
||||
"net.example.storage.other-driver": {"net.example.storage/rack"},
|
||||
},
|
||||
@ -216,11 +224,13 @@ func TestInstallCSIDriver(t *testing.T) {
|
||||
Name: "net.example.storage.other-driver",
|
||||
NodeID: "net.example.storage/test-node",
|
||||
TopologyKeys: []string{"net.example.storage/rack"},
|
||||
Allocatable: nil,
|
||||
},
|
||||
{
|
||||
Name: "com.example.csi.driver1",
|
||||
NodeID: "com.example.csi/csi-node1",
|
||||
TopologyKeys: []string{"com.example.csi/zone"},
|
||||
Allocatable: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -240,6 +250,7 @@ func TestInstallCSIDriver(t *testing.T) {
|
||||
nodeIDMap{
|
||||
"com.example.csi.driver1": "com.example.csi/csi-node1",
|
||||
},
|
||||
nil, /* volumeLimits */
|
||||
topologyKeyMap{
|
||||
"com.example.csi.driver1": {"com.example.csi/zone"},
|
||||
},
|
||||
@ -264,6 +275,7 @@ func TestInstallCSIDriver(t *testing.T) {
|
||||
nodeIDMap{
|
||||
"com.example.csi.driver1": "com.example.csi/csi-node1",
|
||||
},
|
||||
nil, /* volumeLimits */
|
||||
topologyKeyMap{
|
||||
"com.example.csi.driver1": {"com.example.csi/zone"},
|
||||
},
|
||||
@ -290,6 +302,7 @@ func TestInstallCSIDriver(t *testing.T) {
|
||||
Name: "com.example.csi.driver1",
|
||||
NodeID: "com.example.csi/other-node",
|
||||
TopologyKeys: []string{"com.example.csi/rack"},
|
||||
Allocatable: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -315,6 +328,7 @@ func TestInstallCSIDriver(t *testing.T) {
|
||||
Name: "com.example.csi.driver1",
|
||||
NodeID: "com.example.csi/csi-node1",
|
||||
TopologyKeys: nil,
|
||||
Allocatable: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -334,6 +348,7 @@ func TestInstallCSIDriver(t *testing.T) {
|
||||
nodeIDMap{
|
||||
"com.example.csi.driver1": "com.example.csi/csi-node1",
|
||||
},
|
||||
nil, /* volumeLimits */
|
||||
topologyKeyMap{
|
||||
"com.example.csi.driver1": {"com.example.csi/zone"},
|
||||
},
|
||||
@ -357,6 +372,7 @@ func TestInstallCSIDriver(t *testing.T) {
|
||||
Name: "com.example.csi.driver1",
|
||||
NodeID: "com.example.csi/csi-node1",
|
||||
TopologyKeys: nil,
|
||||
Allocatable: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -376,6 +392,7 @@ func TestInstallCSIDriver(t *testing.T) {
|
||||
nodeIDMap{
|
||||
"net.example.storage.other-driver": "net.example.storage/test-node",
|
||||
},
|
||||
nil, /* volumeLimits */
|
||||
topologyKeyMap{
|
||||
"net.example.storage.other-driver": {"net.example.storage/rack"},
|
||||
},
|
||||
@ -402,11 +419,13 @@ func TestInstallCSIDriver(t *testing.T) {
|
||||
Name: "net.example.storage.other-driver",
|
||||
NodeID: "net.example.storage/test-node",
|
||||
TopologyKeys: []string{"net.example.storage/rack"},
|
||||
Allocatable: nil,
|
||||
},
|
||||
{
|
||||
Name: "com.example.csi.driver1",
|
||||
NodeID: "com.example.csi/csi-node1",
|
||||
TopologyKeys: nil,
|
||||
Allocatable: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -420,7 +439,7 @@ func TestInstallCSIDriver(t *testing.T) {
|
||||
expectFail: true,
|
||||
},
|
||||
{
|
||||
name: "new node with valid max limit",
|
||||
name: "new node with valid max limit of volumes",
|
||||
driverName: "com.example.csi.driver1",
|
||||
existingNode: generateNode(nil /*nodeIDs*/, nil /*labels*/, nil /*capacity*/),
|
||||
inputVolumeLimit: 10,
|
||||
@ -431,15 +450,94 @@ func TestInstallCSIDriver(t *testing.T) {
|
||||
Name: "node1",
|
||||
Annotations: map[string]string{annotationKeyNodeID: marshall(nodeIDMap{"com.example.csi.driver1": "com.example.csi/csi-node1"})},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(util.GetCSIAttachLimitKey("com.example.csi.driver1")): *resource.NewQuantity(10, resource.DecimalSI),
|
||||
},
|
||||
expectedCSINode: &storage.CSINode{
|
||||
ObjectMeta: getCSINodeObjectMeta(),
|
||||
Spec: storage.CSINodeSpec{
|
||||
Drivers: []storage.CSINodeDriver{
|
||||
{
|
||||
Name: "com.example.csi.driver1",
|
||||
NodeID: "com.example.csi/csi-node1",
|
||||
TopologyKeys: nil,
|
||||
Allocatable: &storage.VolumeNodeResources{
|
||||
Count: utilpointer.Int32Ptr(10),
|
||||
},
|
||||
},
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceName(util.GetCSIAttachLimitKey("com.example.csi.driver1")): *resource.NewQuantity(10, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new node with max limit of volumes",
|
||||
driverName: "com.example.csi.driver1",
|
||||
existingNode: generateNode(nil /*nodeIDs*/, nil /*labels*/, nil /*capacity*/),
|
||||
inputVolumeLimit: math.MaxInt32,
|
||||
inputTopology: nil,
|
||||
inputNodeID: "com.example.csi/csi-node1",
|
||||
expectedNode: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
Annotations: map[string]string{annotationKeyNodeID: marshall(nodeIDMap{"com.example.csi.driver1": "com.example.csi/csi-node1"})},
|
||||
},
|
||||
},
|
||||
expectedCSINode: &storage.CSINode{
|
||||
ObjectMeta: getCSINodeObjectMeta(),
|
||||
Spec: storage.CSINodeSpec{
|
||||
Drivers: []storage.CSINodeDriver{
|
||||
{
|
||||
Name: "com.example.csi.driver1",
|
||||
NodeID: "com.example.csi/csi-node1",
|
||||
TopologyKeys: nil,
|
||||
Allocatable: &storage.VolumeNodeResources{
|
||||
Count: utilpointer.Int32Ptr(math.MaxInt32),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new node with overflown max limit of volumes",
|
||||
driverName: "com.example.csi.driver1",
|
||||
existingNode: generateNode(nil /*nodeIDs*/, nil /*labels*/, nil /*capacity*/),
|
||||
inputVolumeLimit: math.MaxInt32 + 1,
|
||||
inputTopology: nil,
|
||||
inputNodeID: "com.example.csi/csi-node1",
|
||||
expectedNode: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
Annotations: map[string]string{annotationKeyNodeID: marshall(nodeIDMap{"com.example.csi.driver1": "com.example.csi/csi-node1"})},
|
||||
},
|
||||
},
|
||||
expectedCSINode: &storage.CSINode{
|
||||
ObjectMeta: getCSINodeObjectMeta(),
|
||||
Spec: storage.CSINodeSpec{
|
||||
Drivers: []storage.CSINodeDriver{
|
||||
{
|
||||
Name: "com.example.csi.driver1",
|
||||
NodeID: "com.example.csi/csi-node1",
|
||||
TopologyKeys: nil,
|
||||
Allocatable: &storage.VolumeNodeResources{
|
||||
Count: utilpointer.Int32Ptr(math.MaxInt32),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new node without max limit of volumes",
|
||||
driverName: "com.example.csi.driver1",
|
||||
existingNode: generateNode(nil /*nodeIDs*/, nil /*labels*/, nil /*capacity*/),
|
||||
inputVolumeLimit: 0,
|
||||
inputTopology: nil,
|
||||
inputNodeID: "com.example.csi/csi-node1",
|
||||
expectedNode: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node1",
|
||||
Annotations: map[string]string{annotationKeyNodeID: marshall(nodeIDMap{"com.example.csi.driver1": "com.example.csi/csi-node1"})},
|
||||
},
|
||||
},
|
||||
expectedCSINode: &storage.CSINode{
|
||||
ObjectMeta: getCSINodeObjectMeta(),
|
||||
Spec: storage.CSINodeSpec{
|
||||
@ -454,15 +552,23 @@ func TestInstallCSIDriver(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "node with existing valid max limit",
|
||||
name: "node with existing valid max limit of volumes",
|
||||
driverName: "com.example.csi.driver1",
|
||||
existingNode: generateNode(
|
||||
nil, /*nodeIDs*/
|
||||
nil, /*labels*/
|
||||
map[v1.ResourceName]resource.Quantity{
|
||||
v1.ResourceCPU: *resource.NewScaledQuantity(4, -3),
|
||||
v1.ResourceName(util.GetCSIAttachLimitKey("com.example.csi/driver1")): *resource.NewQuantity(10, resource.DecimalSI),
|
||||
}),
|
||||
|
||||
existingCSINode: generateCSINode(
|
||||
nodeIDMap{
|
||||
"com.example.csi.driver1": "com.example.csi/csi-node1",
|
||||
},
|
||||
generateVolumeLimits(10),
|
||||
nil, /* topologyKeys */
|
||||
),
|
||||
|
||||
inputVolumeLimit: 20,
|
||||
inputTopology: nil,
|
||||
inputNodeID: "com.example.csi/csi-node1",
|
||||
@ -473,14 +579,10 @@ func TestInstallCSIDriver(t *testing.T) {
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(util.GetCSIAttachLimitKey("com.example.csi.driver1")): *resource.NewQuantity(20, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewScaledQuantity(4, -3),
|
||||
v1.ResourceName(util.GetCSIAttachLimitKey("com.example.csi/driver1")): *resource.NewQuantity(10, resource.DecimalSI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceName(util.GetCSIAttachLimitKey("com.example.csi.driver1")): *resource.NewQuantity(20, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewScaledQuantity(4, -3),
|
||||
v1.ResourceName(util.GetCSIAttachLimitKey("com.example.csi/driver1")): *resource.NewQuantity(10, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -492,6 +594,7 @@ func TestInstallCSIDriver(t *testing.T) {
|
||||
Name: "com.example.csi.driver1",
|
||||
NodeID: "com.example.csi/csi-node1",
|
||||
TopologyKeys: nil,
|
||||
Allocatable: generateVolumeLimits(10),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -502,6 +605,12 @@ func TestInstallCSIDriver(t *testing.T) {
|
||||
test(t, true /* addNodeInfo */, true /* csiNodeInfoEnabled */, testcases)
|
||||
}
|
||||
|
||||
func generateVolumeLimits(i int32) *storage.VolumeNodeResources {
|
||||
return &storage.VolumeNodeResources{
|
||||
Count: utilpointer.Int32Ptr(i),
|
||||
}
|
||||
}
|
||||
|
||||
// TestInstallCSIDriver_CSINodeInfoDisabled tests InstallCSIDriver with various existing Node annotations
|
||||
// and CSINodeInfo feature gate disabled.
|
||||
func TestInstallCSIDriverCSINodeInfoDisabled(t *testing.T) {
|
||||
@ -589,6 +698,7 @@ func TestUninstallCSIDriver(t *testing.T) {
|
||||
nodeIDMap{
|
||||
"com.example.csi.driver1": "com.example.csi/csi-node1",
|
||||
},
|
||||
nil, /* volumeLimits */
|
||||
topologyKeyMap{
|
||||
"com.example.csi.driver1": {"com.example.csi/zone"},
|
||||
},
|
||||
@ -619,6 +729,7 @@ func TestUninstallCSIDriver(t *testing.T) {
|
||||
nodeIDMap{
|
||||
"net.example.storage.other-driver": "net.example.storage/csi-node1",
|
||||
},
|
||||
nil, /* volumeLimits */
|
||||
topologyKeyMap{
|
||||
"net.example.storage.other-driver": {"net.example.storage/zone"},
|
||||
},
|
||||
@ -1116,12 +1227,13 @@ func marshall(nodeIDs nodeIDMap) string {
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func generateCSINode(nodeIDs nodeIDMap, topologyKeys topologyKeyMap) *storage.CSINode {
|
||||
func generateCSINode(nodeIDs nodeIDMap, volumeLimits *storage.VolumeNodeResources, topologyKeys topologyKeyMap) *storage.CSINode {
|
||||
nodeDrivers := []storage.CSINodeDriver{}
|
||||
for k, nodeID := range nodeIDs {
|
||||
dspec := storage.CSINodeDriver{
|
||||
Name: k,
|
||||
NodeID: nodeID,
|
||||
Name: k,
|
||||
NodeID: nodeID,
|
||||
Allocatable: volumeLimits,
|
||||
}
|
||||
if top, exists := topologyKeys[k]; exists {
|
||||
dspec.TopologyKeys = top
|
||||
|
@ -414,34 +414,6 @@ func ClusterRoles() []rbacv1.ClusterRole {
|
||||
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts/token").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role to use for the kube-scheduler
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:kube-scheduler"},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
eventsRule(),
|
||||
|
||||
// this is for leaderlease access
|
||||
// TODO: scope this to the kube-system namespace
|
||||
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("endpoints").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "update", "patch", "delete").Groups(legacyGroup).Resources("endpoints").Names("kube-scheduler").RuleOrDie(),
|
||||
|
||||
// fundamental resources
|
||||
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("pods/binding", "bindings").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("patch", "update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
|
||||
// things that select pods
|
||||
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("services", "replicationcontrollers").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(appsGroup).Resources("statefulsets").RuleOrDie(),
|
||||
// things that pods use or applies to them
|
||||
rbacv1helpers.NewRule(Read...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("persistentvolumeclaims", "persistentvolumes").RuleOrDie(),
|
||||
// Needed to check API access. These creates are non-mutating
|
||||
rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role to use for the kube-dns pod
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:kube-dns"},
|
||||
@ -498,6 +470,39 @@ func ClusterRoles() []rbacv1.ClusterRole {
|
||||
},
|
||||
}
|
||||
|
||||
kubeSchedulerRules := []rbacv1.PolicyRule{
|
||||
eventsRule(),
|
||||
// This is for leaderlease access
|
||||
// TODO: scope this to the kube-system namespace
|
||||
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("endpoints").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "update", "patch", "delete").Groups(legacyGroup).Resources("endpoints").Names("kube-scheduler").RuleOrDie(),
|
||||
|
||||
// Fundamental resources
|
||||
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("pods/binding", "bindings").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("patch", "update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
|
||||
// Things that select pods
|
||||
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("services", "replicationcontrollers").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(appsGroup, extensionsGroup).Resources("replicasets").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(appsGroup).Resources("statefulsets").RuleOrDie(),
|
||||
// Things that pods use or applies to them
|
||||
rbacv1helpers.NewRule(Read...).Groups(policyGroup).Resources("poddisruptionbudgets").RuleOrDie(),
|
||||
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("persistentvolumeclaims", "persistentvolumes").RuleOrDie(),
|
||||
// Needed to check API access. These creates are non-mutating
|
||||
rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(),
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.CSINodeInfo) &&
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.AttachVolumeLimit) {
|
||||
kubeSchedulerRules = append(kubeSchedulerRules, rbacv1helpers.NewRule(Read...).Groups(storageGroup).Resources("csinodes").RuleOrDie())
|
||||
}
|
||||
roles = append(roles, rbacv1.ClusterRole{
|
||||
// a role to use for the kube-scheduler
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:kube-scheduler"},
|
||||
Rules: kubeSchedulerRules,
|
||||
})
|
||||
|
||||
externalProvisionerRules := []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("create", "delete", "get", "list", "watch").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "update", "patch").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
|
||||
|
@ -802,6 +802,14 @@ items:
|
||||
- subjectaccessreviews
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- storage.k8s.io
|
||||
resources:
|
||||
- csinodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
|
386
staging/src/k8s.io/api/storage/v1beta1/generated.pb.go
generated
386
staging/src/k8s.io/api/storage/v1beta1/generated.pb.go
generated
@ -39,6 +39,7 @@ limitations under the License.
|
||||
VolumeAttachmentSpec
|
||||
VolumeAttachmentStatus
|
||||
VolumeError
|
||||
VolumeNodeResources
|
||||
*/
|
||||
package v1beta1
|
||||
|
||||
@ -126,6 +127,10 @@ func (m *VolumeError) Reset() { *m = VolumeError{} }
|
||||
func (*VolumeError) ProtoMessage() {}
|
||||
func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} }
|
||||
|
||||
func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} }
|
||||
func (*VolumeNodeResources) ProtoMessage() {}
|
||||
func (*VolumeNodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} }
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*CSIDriver)(nil), "k8s.io.api.storage.v1beta1.CSIDriver")
|
||||
proto.RegisterType((*CSIDriverList)(nil), "k8s.io.api.storage.v1beta1.CSIDriverList")
|
||||
@ -142,6 +147,7 @@ func init() {
|
||||
proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSpec")
|
||||
proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus")
|
||||
proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1beta1.VolumeError")
|
||||
proto.RegisterType((*VolumeNodeResources)(nil), "k8s.io.api.storage.v1beta1.VolumeNodeResources")
|
||||
}
|
||||
func (m *CSIDriver) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
@ -325,6 +331,16 @@ func (m *CSINodeDriver) MarshalTo(dAtA []byte) (int, error) {
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
if m.Allocatable != nil {
|
||||
dAtA[i] = 0x22
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(m.Allocatable.Size()))
|
||||
n6, err := m.Allocatable.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n6
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -346,11 +362,11 @@ func (m *CSINodeList) MarshalTo(dAtA []byte) (int, error) {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
|
||||
n6, err := m.ListMeta.MarshalTo(dAtA[i:])
|
||||
n7, err := m.ListMeta.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n6
|
||||
i += n7
|
||||
if len(m.Items) > 0 {
|
||||
for _, msg := range m.Items {
|
||||
dAtA[i] = 0x12
|
||||
@ -414,11 +430,11 @@ func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
|
||||
n7, err := m.ObjectMeta.MarshalTo(dAtA[i:])
|
||||
n8, err := m.ObjectMeta.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n7
|
||||
i += n8
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner)))
|
||||
@ -515,11 +531,11 @@ func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
|
||||
n8, err := m.ListMeta.MarshalTo(dAtA[i:])
|
||||
n9, err := m.ListMeta.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n8
|
||||
i += n9
|
||||
if len(m.Items) > 0 {
|
||||
for _, msg := range m.Items {
|
||||
dAtA[i] = 0x12
|
||||
@ -553,27 +569,27 @@ func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size()))
|
||||
n9, err := m.ObjectMeta.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n9
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
|
||||
n10, err := m.Spec.MarshalTo(dAtA[i:])
|
||||
n10, err := m.ObjectMeta.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n10
|
||||
dAtA[i] = 0x1a
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size()))
|
||||
n11, err := m.Status.MarshalTo(dAtA[i:])
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size()))
|
||||
n11, err := m.Spec.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n11
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size()))
|
||||
n12, err := m.Status.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n12
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -595,11 +611,11 @@ func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size()))
|
||||
n12, err := m.ListMeta.MarshalTo(dAtA[i:])
|
||||
n13, err := m.ListMeta.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n12
|
||||
i += n13
|
||||
if len(m.Items) > 0 {
|
||||
for _, msg := range m.Items {
|
||||
dAtA[i] = 0x12
|
||||
@ -640,11 +656,11 @@ func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) {
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(m.InlineVolumeSpec.Size()))
|
||||
n13, err := m.InlineVolumeSpec.MarshalTo(dAtA[i:])
|
||||
n14, err := m.InlineVolumeSpec.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n13
|
||||
i += n14
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
@ -671,11 +687,11 @@ func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) {
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size()))
|
||||
n14, err := m.Source.MarshalTo(dAtA[i:])
|
||||
n15, err := m.Source.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n14
|
||||
i += n15
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName)))
|
||||
@ -732,21 +748,21 @@ func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) {
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size()))
|
||||
n15, err := m.AttachError.MarshalTo(dAtA[i:])
|
||||
n16, err := m.AttachError.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n15
|
||||
i += n16
|
||||
}
|
||||
if m.DetachError != nil {
|
||||
dAtA[i] = 0x22
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size()))
|
||||
n16, err := m.DetachError.MarshalTo(dAtA[i:])
|
||||
n17, err := m.DetachError.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n16
|
||||
i += n17
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
@ -769,11 +785,11 @@ func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size()))
|
||||
n17, err := m.Time.MarshalTo(dAtA[i:])
|
||||
n18, err := m.Time.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n17
|
||||
i += n18
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message)))
|
||||
@ -781,6 +797,29 @@ func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) {
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *VolumeNodeResources) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *VolumeNodeResources) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.Count != nil {
|
||||
dAtA[i] = 0x8
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(*m.Count))
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
@ -849,6 +888,10 @@ func (m *CSINodeDriver) Size() (n int) {
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.Allocatable != nil {
|
||||
l = m.Allocatable.Size()
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -1018,6 +1061,15 @@ func (m *VolumeError) Size() (n int) {
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *VolumeNodeResources) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if m.Count != nil {
|
||||
n += 1 + sovGenerated(uint64(*m.Count))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovGenerated(x uint64) (n int) {
|
||||
for {
|
||||
n++
|
||||
@ -1083,6 +1135,7 @@ func (this *CSINodeDriver) String() string {
|
||||
`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
|
||||
`NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`,
|
||||
`TopologyKeys:` + fmt.Sprintf("%v", this.TopologyKeys) + `,`,
|
||||
`Allocatable:` + strings.Replace(fmt.Sprintf("%v", this.Allocatable), "VolumeNodeResources", "VolumeNodeResources", 1) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -1226,6 +1279,16 @@ func (this *VolumeError) String() string {
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func (this *VolumeNodeResources) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&VolumeNodeResources{`,
|
||||
`Count:` + valueToStringGenerated(this.Count) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func valueToStringGenerated(v interface{}) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
@ -1773,6 +1836,39 @@ func (m *CSINodeDriver) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
case 4:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Allocatable == nil {
|
||||
m.Allocatable = &VolumeNodeResources{}
|
||||
}
|
||||
if err := m.Allocatable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
@ -3330,6 +3426,76 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: VolumeNodeResources: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: VolumeNodeResources: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
|
||||
}
|
||||
var v int32
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int32(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
m.Count = &v
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipGenerated(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
@ -3440,83 +3606,87 @@ func init() {
|
||||
}
|
||||
|
||||
var fileDescriptorGenerated = []byte{
|
||||
// 1247 bytes of a gzipped FileDescriptorProto
|
||||
// 1311 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4d, 0x6f, 0x1b, 0x45,
|
||||
0x18, 0xce, 0xc6, 0xf9, 0x1c, 0x27, 0xad, 0x33, 0x44, 0x60, 0x7c, 0xb0, 0x23, 0x23, 0x68, 0x5a,
|
||||
0xb5, 0xeb, 0xb6, 0x2a, 0xa8, 0xaa, 0xc4, 0x21, 0x4e, 0x23, 0xe1, 0xb6, 0x4e, 0xc3, 0x24, 0xaa,
|
||||
0x50, 0xc5, 0x81, 0xc9, 0xee, 0x5b, 0x67, 0x1b, 0xef, 0xce, 0x76, 0x76, 0x6c, 0xf0, 0x8d, 0x13,
|
||||
0x1c, 0x41, 0x1c, 0xf8, 0x05, 0xfc, 0x05, 0x90, 0xe0, 0xc2, 0x91, 0x9e, 0x50, 0xc5, 0xa9, 0x27,
|
||||
0x8b, 0x2e, 0xff, 0xa2, 0xe2, 0x80, 0x66, 0x76, 0xec, 0xfd, 0xb0, 0xdd, 0x38, 0x1c, 0x7c, 0xf3,
|
||||
0xbc, 0x1f, 0xcf, 0xfb, 0xf5, 0xcc, 0x3b, 0x6b, 0xb4, 0x7b, 0x7a, 0x3b, 0x30, 0x1d, 0x56, 0x3b,
|
||||
0xed, 0x1c, 0x03, 0xf7, 0x40, 0x40, 0x50, 0xeb, 0x82, 0x67, 0x33, 0x5e, 0xd3, 0x0a, 0xea, 0x3b,
|
||||
0xb5, 0x40, 0x30, 0x4e, 0x5b, 0x50, 0xeb, 0xde, 0x38, 0x06, 0x41, 0x6f, 0xd4, 0x5a, 0xe0, 0x01,
|
||||
0xa7, 0x02, 0x6c, 0xd3, 0xe7, 0x4c, 0x30, 0x5c, 0x8a, 0x6c, 0x4d, 0xea, 0x3b, 0xa6, 0xb6, 0x35,
|
||||
0xb5, 0x6d, 0xe9, 0x5a, 0xcb, 0x11, 0x27, 0x9d, 0x63, 0xd3, 0x62, 0x6e, 0xad, 0xc5, 0x5a, 0xac,
|
||||
0xa6, 0x5c, 0x8e, 0x3b, 0x4f, 0xd4, 0x49, 0x1d, 0xd4, 0xaf, 0x08, 0xaa, 0x54, 0x4d, 0x84, 0xb5,
|
||||
0x18, 0x97, 0x31, 0xb3, 0xe1, 0x4a, 0xb7, 0x62, 0x1b, 0x97, 0x5a, 0x27, 0x8e, 0x07, 0xbc, 0x57,
|
||||
0xf3, 0x4f, 0x5b, 0x52, 0x10, 0xd4, 0x5c, 0x10, 0x74, 0x9c, 0x57, 0x6d, 0x92, 0x17, 0xef, 0x78,
|
||||
0xc2, 0x71, 0x61, 0xc4, 0xe1, 0xa3, 0xb3, 0x1c, 0x02, 0xeb, 0x04, 0x5c, 0x9a, 0xf5, 0xab, 0xfe,
|
||||
0x66, 0xa0, 0xd5, 0xdd, 0xc3, 0xc6, 0x5d, 0xee, 0x74, 0x81, 0xe3, 0x2f, 0xd0, 0x8a, 0xcc, 0xc8,
|
||||
0xa6, 0x82, 0x16, 0x8d, 0x2d, 0x63, 0x3b, 0x7f, 0xf3, 0xba, 0x19, 0xb7, 0x6b, 0x08, 0x6c, 0xfa,
|
||||
0xa7, 0x2d, 0x29, 0x08, 0x4c, 0x69, 0x6d, 0x76, 0x6f, 0x98, 0x0f, 0x8f, 0x9f, 0x82, 0x25, 0x9a,
|
||||
0x20, 0x68, 0x1d, 0x3f, 0xef, 0x57, 0xe6, 0xc2, 0x7e, 0x05, 0xc5, 0x32, 0x32, 0x44, 0xc5, 0xf7,
|
||||
0xd1, 0x42, 0xe0, 0x83, 0x55, 0x9c, 0x57, 0xe8, 0x97, 0xcd, 0xc9, 0xc3, 0x30, 0x87, 0x69, 0x1d,
|
||||
0xfa, 0x60, 0xd5, 0xd7, 0x34, 0xec, 0x82, 0x3c, 0x11, 0x05, 0x52, 0xfd, 0xd5, 0x40, 0xeb, 0x43,
|
||||
0xab, 0x07, 0x4e, 0x20, 0xf0, 0xe7, 0x23, 0x05, 0x98, 0xd3, 0x15, 0x20, 0xbd, 0x55, 0xfa, 0x05,
|
||||
0x1d, 0x67, 0x65, 0x20, 0x49, 0x24, 0x7f, 0x0f, 0x2d, 0x3a, 0x02, 0xdc, 0xa0, 0x38, 0xbf, 0x95,
|
||||
0xdb, 0xce, 0xdf, 0x7c, 0x7f, 0xaa, 0xec, 0xeb, 0xeb, 0x1a, 0x71, 0xb1, 0x21, 0x7d, 0x49, 0x04,
|
||||
0x51, 0xfd, 0x36, 0x99, 0xbb, 0xac, 0x09, 0xdf, 0x41, 0x17, 0xa8, 0x10, 0xd4, 0x3a, 0x21, 0xf0,
|
||||
0xac, 0xe3, 0x70, 0xb0, 0x55, 0x05, 0x2b, 0x75, 0x1c, 0xf6, 0x2b, 0x17, 0x76, 0x52, 0x1a, 0x92,
|
||||
0xb1, 0x94, 0xbe, 0x3e, 0xb3, 0x1b, 0xde, 0x13, 0xf6, 0xd0, 0x6b, 0xb2, 0x8e, 0x27, 0x54, 0x83,
|
||||
0xb5, 0xef, 0x41, 0x4a, 0x43, 0x32, 0x96, 0xd5, 0x5f, 0x0c, 0xb4, 0xbc, 0x7b, 0xd8, 0xd8, 0x67,
|
||||
0x36, 0xcc, 0x80, 0x00, 0x8d, 0x14, 0x01, 0x2e, 0x9d, 0xd1, 0x42, 0x99, 0xd4, 0xc4, 0xf1, 0x7f,
|
||||
0x17, 0xb5, 0x50, 0xda, 0x68, 0xfe, 0x6e, 0xa1, 0x05, 0x8f, 0xba, 0xa0, 0x52, 0x5f, 0x8d, 0x7d,
|
||||
0xf6, 0xa9, 0x0b, 0x44, 0x69, 0xf0, 0x07, 0x68, 0xc9, 0x63, 0x36, 0x34, 0xee, 0xaa, 0x04, 0x56,
|
||||
0xeb, 0x17, 0xb4, 0xcd, 0xd2, 0xbe, 0x92, 0x12, 0xad, 0xc5, 0xb7, 0xd0, 0x9a, 0x60, 0x3e, 0x6b,
|
||||
0xb3, 0x56, 0xef, 0x3e, 0xf4, 0x82, 0x62, 0x6e, 0x2b, 0xb7, 0xbd, 0x5a, 0x2f, 0x84, 0xfd, 0xca,
|
||||
0xda, 0x51, 0x42, 0x4e, 0x52, 0x56, 0xd5, 0x9f, 0x0d, 0x94, 0xd7, 0x19, 0xcd, 0x80, 0x8e, 0x9f,
|
||||
0xa4, 0xe9, 0xf8, 0xde, 0x14, 0xbd, 0x9c, 0x40, 0x46, 0x6b, 0x98, 0xb6, 0x62, 0xe2, 0x11, 0x5a,
|
||||
0xb6, 0x55, 0x43, 0x83, 0xa2, 0xa1, 0xa0, 0x2f, 0x4f, 0x01, 0xad, 0xd9, 0x7e, 0x51, 0x07, 0x58,
|
||||
0x8e, 0xce, 0x01, 0x19, 0x40, 0x55, 0x7f, 0x58, 0x42, 0x6b, 0x87, 0x91, 0xef, 0x6e, 0x9b, 0x06,
|
||||
0xc1, 0x0c, 0xc8, 0xf6, 0x21, 0xca, 0xfb, 0x9c, 0x75, 0x9d, 0xc0, 0x61, 0x1e, 0x70, 0x3d, 0xf2,
|
||||
0xb7, 0xb4, 0x4b, 0xfe, 0x20, 0x56, 0x91, 0xa4, 0x1d, 0x6e, 0x23, 0xe4, 0x53, 0x4e, 0x5d, 0x10,
|
||||
0xb2, 0x05, 0x39, 0xd5, 0x82, 0xdb, 0x6f, 0x6a, 0x41, 0xb2, 0x2c, 0xf3, 0x60, 0xe8, 0xba, 0xe7,
|
||||
0x09, 0xde, 0x8b, 0x53, 0x8c, 0x15, 0x24, 0x81, 0x8f, 0x4f, 0xd1, 0x3a, 0x07, 0xab, 0x4d, 0x1d,
|
||||
0xf7, 0x80, 0xb5, 0x1d, 0xab, 0x57, 0x5c, 0x50, 0x69, 0xee, 0x85, 0xfd, 0xca, 0x3a, 0x49, 0x2a,
|
||||
0x5e, 0xf7, 0x2b, 0xd7, 0x47, 0x5f, 0x1c, 0xf3, 0x00, 0x78, 0xe0, 0x04, 0x02, 0x3c, 0xf1, 0x88,
|
||||
0xb5, 0x3b, 0x2e, 0xa4, 0x7c, 0x48, 0x1a, 0x5b, 0xf2, 0xda, 0x95, 0xb7, 0xfe, 0xa1, 0x2f, 0x1c,
|
||||
0xe6, 0x05, 0xc5, 0xc5, 0x98, 0xd7, 0xcd, 0x84, 0x9c, 0xa4, 0xac, 0xf0, 0x03, 0xb4, 0x49, 0xdb,
|
||||
0x6d, 0xf6, 0x65, 0x14, 0x60, 0xef, 0x2b, 0x9f, 0x7a, 0xb2, 0x55, 0xc5, 0x25, 0xb5, 0x64, 0x8a,
|
||||
0x61, 0xbf, 0xb2, 0xb9, 0x33, 0x46, 0x4f, 0xc6, 0x7a, 0xe1, 0xcf, 0xd0, 0x46, 0x57, 0x89, 0xea,
|
||||
0x8e, 0x67, 0x3b, 0x5e, 0xab, 0xc9, 0x6c, 0x28, 0x2e, 0xab, 0xa2, 0xaf, 0x84, 0xfd, 0xca, 0xc6,
|
||||
0xa3, 0xac, 0xf2, 0xf5, 0x38, 0x21, 0x19, 0x05, 0xc1, 0xcf, 0xd0, 0x86, 0x8a, 0x08, 0xb6, 0xbe,
|
||||
0xa4, 0x0e, 0x04, 0xc5, 0x15, 0x35, 0xbf, 0xed, 0xe4, 0xfc, 0x64, 0xeb, 0x24, 0x91, 0x06, 0x57,
|
||||
0xf9, 0x10, 0xda, 0x60, 0x09, 0xc6, 0x8f, 0x80, 0xbb, 0xf5, 0x77, 0xf5, 0xbc, 0x36, 0x76, 0xb2,
|
||||
0x50, 0x64, 0x14, 0xbd, 0xf4, 0x31, 0xba, 0x98, 0x19, 0x38, 0x2e, 0xa0, 0xdc, 0x29, 0xf4, 0xa2,
|
||||
0x25, 0x44, 0xe4, 0x4f, 0xbc, 0x89, 0x16, 0xbb, 0xb4, 0xdd, 0x81, 0x88, 0x81, 0x24, 0x3a, 0xdc,
|
||||
0x99, 0xbf, 0x6d, 0x54, 0x7f, 0x37, 0x50, 0x21, 0xc9, 0x9e, 0x19, 0xac, 0x8d, 0x66, 0x7a, 0x6d,
|
||||
0x6c, 0x4f, 0x4b, 0xec, 0x09, 0xbb, 0xe3, 0xa7, 0x79, 0x54, 0x88, 0x86, 0x13, 0xbd, 0x51, 0x2e,
|
||||
0x78, 0x62, 0x06, 0x57, 0x9b, 0xa4, 0xde, 0x91, 0xeb, 0x6f, 0x2a, 0x22, 0x9b, 0xdd, 0xa4, 0x07,
|
||||
0x05, 0x3f, 0x46, 0x4b, 0x81, 0xa0, 0xa2, 0x23, 0xef, 0xbc, 0x44, 0xbd, 0x79, 0x2e, 0x54, 0xe5,
|
||||
0x19, 0x3f, 0x28, 0xd1, 0x99, 0x68, 0xc4, 0xea, 0x1f, 0x06, 0xda, 0xcc, 0xba, 0xcc, 0x60, 0xd8,
|
||||
0x9f, 0xa6, 0x87, 0x7d, 0xf5, 0x3c, 0x15, 0x4d, 0x18, 0xf8, 0x5f, 0x06, 0x7a, 0x7b, 0xa4, 0x78,
|
||||
0xd6, 0xe1, 0x16, 0xc8, 0x3d, 0xe1, 0x67, 0xb6, 0xd1, 0x7e, 0xfc, 0x1e, 0xab, 0x3d, 0x71, 0x30,
|
||||
0x46, 0x4f, 0xc6, 0x7a, 0xe1, 0xa7, 0xa8, 0xe0, 0x78, 0x6d, 0xc7, 0x83, 0x48, 0x76, 0x18, 0x8f,
|
||||
0x7b, 0xec, 0x65, 0xce, 0x22, 0xab, 0x31, 0x6f, 0x86, 0xfd, 0x4a, 0xa1, 0x91, 0x41, 0x21, 0x23,
|
||||
0xb8, 0xd5, 0x3f, 0xc7, 0x8c, 0x47, 0xbd, 0x85, 0x57, 0xd1, 0x4a, 0xf4, 0xad, 0x05, 0x5c, 0x97,
|
||||
0x31, 0x6c, 0xf7, 0x8e, 0x96, 0x93, 0xa1, 0x85, 0x62, 0x90, 0x6a, 0x85, 0x4e, 0xf4, 0x7c, 0x0c,
|
||||
0x52, 0x9e, 0x09, 0x06, 0xa9, 0x33, 0xd1, 0x88, 0x32, 0x13, 0xf9, 0x71, 0xa2, 0x1a, 0x9a, 0x4b,
|
||||
0x67, 0xb2, 0xaf, 0xe5, 0x64, 0x68, 0x51, 0xfd, 0x37, 0x37, 0x66, 0x4a, 0x8a, 0x8a, 0x89, 0x92,
|
||||
0x06, 0x9f, 0x98, 0xd9, 0x92, 0xec, 0x61, 0x49, 0x36, 0xfe, 0xd1, 0x40, 0x98, 0x0e, 0x21, 0x9a,
|
||||
0x03, 0xaa, 0x46, 0x7c, 0xba, 0x77, 0xfe, 0x1b, 0x62, 0xee, 0x8c, 0x80, 0x45, 0xef, 0x64, 0x49,
|
||||
0x27, 0x81, 0x47, 0x0d, 0xc8, 0x98, 0x0c, 0xb0, 0x83, 0xf2, 0x91, 0x74, 0x8f, 0x73, 0xc6, 0xf5,
|
||||
0x95, 0xbd, 0x74, 0x76, 0x42, 0xca, 0xbc, 0x5e, 0x96, 0x5f, 0x00, 0x3b, 0xb1, 0xff, 0xeb, 0x7e,
|
||||
0x25, 0x9f, 0xd0, 0x93, 0x24, 0xb6, 0x0c, 0x65, 0x43, 0x1c, 0x6a, 0xe1, 0x7f, 0x84, 0xba, 0x0b,
|
||||
0x93, 0x43, 0x25, 0xb0, 0x4b, 0x7b, 0xe8, 0x9d, 0x09, 0x0d, 0x3a, 0xd7, 0xbb, 0xf2, 0x8d, 0x81,
|
||||
0x92, 0x31, 0xf0, 0x03, 0xb4, 0x20, 0xff, 0x06, 0xea, 0x0d, 0x73, 0x65, 0xba, 0x0d, 0x73, 0xe4,
|
||||
0xb8, 0x10, 0x2f, 0x4a, 0x79, 0x22, 0x0a, 0x05, 0x5f, 0x46, 0xcb, 0x2e, 0x04, 0x01, 0x6d, 0xe9,
|
||||
0xc8, 0xf1, 0x57, 0x5f, 0x33, 0x12, 0x93, 0x81, 0xbe, 0x7e, 0xed, 0xf9, 0xab, 0xf2, 0xdc, 0x8b,
|
||||
0x57, 0xe5, 0xb9, 0x97, 0xaf, 0xca, 0x73, 0x5f, 0x87, 0x65, 0xe3, 0x79, 0x58, 0x36, 0x5e, 0x84,
|
||||
0x65, 0xe3, 0x65, 0x58, 0x36, 0xfe, 0x0e, 0xcb, 0xc6, 0xf7, 0xff, 0x94, 0xe7, 0x1e, 0x2f, 0xeb,
|
||||
0xbe, 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0xf9, 0xfc, 0xf7, 0xf5, 0xe3, 0x0f, 0x00, 0x00,
|
||||
0x18, 0xce, 0xc6, 0xf9, 0x1c, 0x27, 0xad, 0x33, 0x8d, 0xc0, 0xf8, 0x60, 0x47, 0x46, 0xd0, 0xb4,
|
||||
0x6a, 0xd7, 0x6d, 0x55, 0xaa, 0xaa, 0x12, 0x87, 0x6c, 0x1a, 0x09, 0xb7, 0x75, 0x1a, 0x26, 0x51,
|
||||
0x85, 0x2a, 0x0e, 0x8c, 0x77, 0xdf, 0x3a, 0xdb, 0x78, 0x77, 0xb6, 0x33, 0x63, 0x43, 0x6e, 0x9c,
|
||||
0xe0, 0x8a, 0x38, 0xf0, 0x0b, 0xf8, 0x0b, 0x20, 0xc1, 0x85, 0x23, 0x3d, 0xa1, 0x8a, 0x53, 0x4f,
|
||||
0x16, 0x5d, 0x7e, 0x02, 0xb7, 0x88, 0x03, 0x9a, 0xd9, 0x89, 0x77, 0xfd, 0xd5, 0x24, 0x1c, 0x72,
|
||||
0xf3, 0xbc, 0x1f, 0xcf, 0xfb, 0xf5, 0xcc, 0x3b, 0x6b, 0xb4, 0x79, 0x70, 0x57, 0xd8, 0x3e, 0xab,
|
||||
0x1d, 0x74, 0x9a, 0xc0, 0x43, 0x90, 0x20, 0x6a, 0x5d, 0x08, 0x3d, 0xc6, 0x6b, 0x46, 0x41, 0x23,
|
||||
0xbf, 0x26, 0x24, 0xe3, 0xb4, 0x05, 0xb5, 0xee, 0xcd, 0x26, 0x48, 0x7a, 0xb3, 0xd6, 0x82, 0x10,
|
||||
0x38, 0x95, 0xe0, 0xd9, 0x11, 0x67, 0x92, 0xe1, 0x52, 0x62, 0x6b, 0xd3, 0xc8, 0xb7, 0x8d, 0xad,
|
||||
0x6d, 0x6c, 0x4b, 0xd7, 0x5b, 0xbe, 0xdc, 0xef, 0x34, 0x6d, 0x97, 0x05, 0xb5, 0x16, 0x6b, 0xb1,
|
||||
0x9a, 0x76, 0x69, 0x76, 0x9e, 0xe9, 0x93, 0x3e, 0xe8, 0x5f, 0x09, 0x54, 0xa9, 0x9a, 0x09, 0xeb,
|
||||
0x32, 0xae, 0x62, 0x0e, 0x87, 0x2b, 0xdd, 0x4e, 0x6d, 0x02, 0xea, 0xee, 0xfb, 0x21, 0xf0, 0xc3,
|
||||
0x5a, 0x74, 0xd0, 0x52, 0x02, 0x51, 0x0b, 0x40, 0xd2, 0x71, 0x5e, 0xb5, 0x49, 0x5e, 0xbc, 0x13,
|
||||
0x4a, 0x3f, 0x80, 0x11, 0x87, 0x3b, 0x27, 0x39, 0x08, 0x77, 0x1f, 0x02, 0x3a, 0xec, 0x57, 0xfd,
|
||||
0xd5, 0x42, 0x8b, 0x9b, 0xbb, 0xf5, 0xfb, 0xdc, 0xef, 0x02, 0xc7, 0x5f, 0xa0, 0x05, 0x95, 0x91,
|
||||
0x47, 0x25, 0x2d, 0x5a, 0x6b, 0xd6, 0x7a, 0xfe, 0xd6, 0x0d, 0x3b, 0x6d, 0x57, 0x1f, 0xd8, 0x8e,
|
||||
0x0e, 0x5a, 0x4a, 0x20, 0x6c, 0x65, 0x6d, 0x77, 0x6f, 0xda, 0x8f, 0x9b, 0xcf, 0xc1, 0x95, 0x0d,
|
||||
0x90, 0xd4, 0xc1, 0x2f, 0x7b, 0x95, 0xa9, 0xb8, 0x57, 0x41, 0xa9, 0x8c, 0xf4, 0x51, 0xf1, 0x43,
|
||||
0x34, 0x23, 0x22, 0x70, 0x8b, 0xd3, 0x1a, 0xfd, 0x8a, 0x3d, 0x79, 0x18, 0x76, 0x3f, 0xad, 0xdd,
|
||||
0x08, 0x5c, 0x67, 0xc9, 0xc0, 0xce, 0xa8, 0x13, 0xd1, 0x20, 0xd5, 0x5f, 0x2c, 0xb4, 0xdc, 0xb7,
|
||||
0x7a, 0xe4, 0x0b, 0x89, 0x3f, 0x1f, 0x29, 0xc0, 0x3e, 0x5d, 0x01, 0xca, 0x5b, 0xa7, 0x5f, 0x30,
|
||||
0x71, 0x16, 0x8e, 0x25, 0x99, 0xe4, 0x1f, 0xa0, 0x59, 0x5f, 0x42, 0x20, 0x8a, 0xd3, 0x6b, 0xb9,
|
||||
0xf5, 0xfc, 0xad, 0x0f, 0x4e, 0x95, 0xbd, 0xb3, 0x6c, 0x10, 0x67, 0xeb, 0xca, 0x97, 0x24, 0x10,
|
||||
0xd5, 0x6f, 0xb3, 0xb9, 0xab, 0x9a, 0xf0, 0x3d, 0x74, 0x81, 0x4a, 0x49, 0xdd, 0x7d, 0x02, 0x2f,
|
||||
0x3a, 0x3e, 0x07, 0x4f, 0x57, 0xb0, 0xe0, 0xe0, 0xb8, 0x57, 0xb9, 0xb0, 0x31, 0xa0, 0x21, 0x43,
|
||||
0x96, 0xca, 0x37, 0x62, 0x5e, 0x3d, 0x7c, 0xc6, 0x1e, 0x87, 0x0d, 0xd6, 0x09, 0xa5, 0x6e, 0xb0,
|
||||
0xf1, 0xdd, 0x19, 0xd0, 0x90, 0x21, 0xcb, 0xea, 0xcf, 0x16, 0x9a, 0xdf, 0xdc, 0xad, 0x6f, 0x33,
|
||||
0x0f, 0xce, 0x81, 0x00, 0xf5, 0x01, 0x02, 0x5c, 0x3e, 0xa1, 0x85, 0x2a, 0xa9, 0x89, 0xe3, 0xff,
|
||||
0x27, 0x69, 0xa1, 0xb2, 0x31, 0xfc, 0x5d, 0x43, 0x33, 0x21, 0x0d, 0x40, 0xa7, 0xbe, 0x98, 0xfa,
|
||||
0x6c, 0xd3, 0x00, 0x88, 0xd6, 0xe0, 0x0f, 0xd1, 0x5c, 0xc8, 0x3c, 0xa8, 0xdf, 0xd7, 0x09, 0x2c,
|
||||
0x3a, 0x17, 0x8c, 0xcd, 0xdc, 0xb6, 0x96, 0x12, 0xa3, 0xc5, 0xb7, 0xd1, 0x92, 0x64, 0x11, 0x6b,
|
||||
0xb3, 0xd6, 0xe1, 0x43, 0x38, 0x14, 0xc5, 0xdc, 0x5a, 0x6e, 0x7d, 0xd1, 0x29, 0xc4, 0xbd, 0xca,
|
||||
0xd2, 0x5e, 0x46, 0x4e, 0x06, 0xac, 0x70, 0x13, 0xe5, 0x69, 0xbb, 0xcd, 0x5c, 0x2a, 0x69, 0xb3,
|
||||
0x0d, 0xc5, 0x19, 0x5d, 0x63, 0xed, 0x6d, 0x35, 0x3e, 0x61, 0xed, 0x4e, 0x00, 0x2a, 0x38, 0x01,
|
||||
0xc1, 0x3a, 0xdc, 0x05, 0xe1, 0x5c, 0x8c, 0x7b, 0x95, 0xfc, 0x46, 0x8a, 0x43, 0xb2, 0xa0, 0xd5,
|
||||
0x9f, 0x2c, 0x94, 0x37, 0x55, 0x9f, 0x03, 0xe5, 0x3f, 0x19, 0xa4, 0xfc, 0xfb, 0xa7, 0x98, 0xd7,
|
||||
0x04, 0xc2, 0xbb, 0xfd, 0xb4, 0x35, 0xdb, 0xf7, 0xd0, 0xbc, 0xa7, 0x87, 0x26, 0x8a, 0x96, 0x86,
|
||||
0xbe, 0x72, 0x0a, 0x68, 0x73, 0xa3, 0x2e, 0x9a, 0x00, 0xf3, 0xc9, 0x59, 0x90, 0x63, 0xa8, 0xea,
|
||||
0xf7, 0x73, 0x68, 0x69, 0x37, 0xf1, 0xdd, 0x6c, 0x53, 0x21, 0xce, 0x81, 0xd0, 0x1f, 0xa1, 0x7c,
|
||||
0xc4, 0x59, 0xd7, 0x17, 0x3e, 0x0b, 0x81, 0x1b, 0x5a, 0x5d, 0x32, 0x2e, 0xf9, 0x9d, 0x54, 0x45,
|
||||
0xb2, 0x76, 0xb8, 0x8d, 0x50, 0x44, 0x39, 0x0d, 0x40, 0xaa, 0x16, 0xe4, 0x74, 0x0b, 0xee, 0xbe,
|
||||
0xad, 0x05, 0xd9, 0xb2, 0xec, 0x9d, 0xbe, 0xeb, 0x56, 0x28, 0xf9, 0x61, 0x9a, 0x62, 0xaa, 0x20,
|
||||
0x19, 0x7c, 0x7c, 0x80, 0x96, 0x39, 0xb8, 0x6d, 0xea, 0x07, 0x3b, 0xac, 0xed, 0xbb, 0x87, 0x9a,
|
||||
0x9a, 0x8b, 0xce, 0x56, 0xdc, 0xab, 0x2c, 0x93, 0xac, 0xe2, 0xa8, 0x57, 0xb9, 0x31, 0xfa, 0xaa,
|
||||
0xd9, 0x3b, 0xc0, 0x85, 0x2f, 0x24, 0x84, 0x32, 0x21, 0xec, 0x80, 0x0f, 0x19, 0xc4, 0x56, 0x77,
|
||||
0x27, 0x50, 0x9b, 0xe5, 0x71, 0x24, 0x7d, 0x16, 0x8a, 0xe2, 0x6c, 0x7a, 0x77, 0x1a, 0x19, 0x39,
|
||||
0x19, 0xb0, 0xc2, 0x8f, 0xd0, 0xaa, 0xa2, 0xf9, 0x97, 0x49, 0x80, 0xad, 0xaf, 0x22, 0x1a, 0xaa,
|
||||
0x56, 0x15, 0xe7, 0xf4, 0x22, 0x2b, 0xc6, 0xbd, 0xca, 0xea, 0xc6, 0x18, 0x3d, 0x19, 0xeb, 0x85,
|
||||
0x3f, 0x43, 0x2b, 0x5d, 0x2d, 0x72, 0xfc, 0xd0, 0xf3, 0xc3, 0x56, 0x83, 0x79, 0x50, 0x9c, 0xd7,
|
||||
0x45, 0x5f, 0x8d, 0x7b, 0x95, 0x95, 0x27, 0xc3, 0xca, 0xa3, 0x71, 0x42, 0x32, 0x0a, 0x82, 0x5f,
|
||||
0xa0, 0x15, 0x1d, 0x11, 0x3c, 0xb3, 0x08, 0x7c, 0x10, 0xc5, 0x05, 0x3d, 0xbf, 0xf5, 0xec, 0xfc,
|
||||
0x54, 0xeb, 0x14, 0x91, 0x8e, 0xd7, 0xc5, 0x2e, 0xb4, 0xc1, 0x95, 0x8c, 0xef, 0x01, 0x0f, 0x9c,
|
||||
0xf7, 0xcc, 0xbc, 0x56, 0x36, 0x86, 0xa1, 0xc8, 0x28, 0x7a, 0xe9, 0x63, 0x74, 0x71, 0x68, 0xe0,
|
||||
0xb8, 0x80, 0x72, 0x07, 0x70, 0x98, 0x2c, 0x3a, 0xa2, 0x7e, 0xe2, 0x55, 0x34, 0xdb, 0xa5, 0xed,
|
||||
0x0e, 0x24, 0x0c, 0x24, 0xc9, 0xe1, 0xde, 0xf4, 0x5d, 0xab, 0xfa, 0x9b, 0x85, 0x0a, 0x59, 0xf6,
|
||||
0x9c, 0xc3, 0xda, 0x68, 0x0c, 0xae, 0x8d, 0xf5, 0xd3, 0x12, 0x7b, 0xc2, 0xee, 0xf8, 0x71, 0x1a,
|
||||
0x15, 0x92, 0xe1, 0x24, 0xef, 0x60, 0x00, 0xa1, 0x3c, 0x87, 0xab, 0x4d, 0x06, 0xde, 0xaa, 0x1b,
|
||||
0x27, 0xef, 0xf1, 0x34, 0xbb, 0x49, 0x8f, 0x16, 0x7e, 0x8a, 0xe6, 0x84, 0xa4, 0xb2, 0xa3, 0xee,
|
||||
0xbc, 0x42, 0xbd, 0x75, 0x26, 0x54, 0xed, 0x99, 0x3e, 0x5a, 0xc9, 0x99, 0x18, 0xc4, 0xea, 0xef,
|
||||
0x16, 0x5a, 0x1d, 0x76, 0x39, 0x87, 0x61, 0x7f, 0x3a, 0x38, 0xec, 0x6b, 0x67, 0xa9, 0x68, 0xc2,
|
||||
0xc0, 0xff, 0xb4, 0xd0, 0x3b, 0x23, 0xc5, 0xeb, 0xe7, 0x51, 0xed, 0x89, 0x68, 0x68, 0x1b, 0x6d,
|
||||
0xa7, 0x6f, 0xbe, 0xde, 0x13, 0x3b, 0x63, 0xf4, 0x64, 0xac, 0x17, 0x7e, 0x8e, 0x0a, 0x7e, 0xd8,
|
||||
0xf6, 0x43, 0x48, 0x64, 0xbb, 0xe9, 0xb8, 0xc7, 0x5e, 0xe6, 0x61, 0x64, 0x3d, 0xe6, 0xd5, 0xb8,
|
||||
0x57, 0x29, 0xd4, 0x87, 0x50, 0xc8, 0x08, 0x6e, 0xf5, 0x8f, 0x31, 0xe3, 0xd1, 0x6f, 0xe1, 0x35,
|
||||
0xb4, 0x90, 0x7c, 0xcf, 0x01, 0x37, 0x65, 0xf4, 0xdb, 0xbd, 0x61, 0xe4, 0xa4, 0x6f, 0xa1, 0x19,
|
||||
0xa4, 0x5b, 0x61, 0x12, 0x3d, 0x1b, 0x83, 0xb4, 0x67, 0x86, 0x41, 0xfa, 0x4c, 0x0c, 0xa2, 0xca,
|
||||
0x44, 0x7d, 0x00, 0xe9, 0x86, 0xe6, 0x06, 0x33, 0xd9, 0x36, 0x72, 0xd2, 0xb7, 0xa8, 0xfe, 0x9b,
|
||||
0x1b, 0x33, 0x25, 0x4d, 0xc5, 0x4c, 0x49, 0xc7, 0x9f, 0xb1, 0xc3, 0x25, 0x79, 0xfd, 0x92, 0x3c,
|
||||
0xfc, 0x83, 0x85, 0x30, 0xed, 0x43, 0x34, 0x8e, 0xa9, 0x9a, 0xf0, 0xe9, 0xc1, 0xd9, 0x6f, 0x88,
|
||||
0xbd, 0x31, 0x02, 0x96, 0xbc, 0x93, 0x25, 0x93, 0x04, 0x1e, 0x35, 0x20, 0x63, 0x32, 0xc0, 0x3e,
|
||||
0xca, 0x27, 0xd2, 0x2d, 0xce, 0x19, 0x37, 0x57, 0xf6, 0xf2, 0xc9, 0x09, 0x69, 0x73, 0xa7, 0xac,
|
||||
0x3f, 0xe4, 0x52, 0xff, 0xa3, 0x5e, 0x25, 0x9f, 0xd1, 0x93, 0x2c, 0xb6, 0x0a, 0xe5, 0x41, 0x1a,
|
||||
0x6a, 0xe6, 0x7f, 0x84, 0xba, 0x0f, 0x93, 0x43, 0x65, 0xb0, 0x4b, 0x5b, 0xe8, 0xdd, 0x09, 0x0d,
|
||||
0x3a, 0xd3, 0xbb, 0xf2, 0x8d, 0x85, 0xb2, 0x31, 0xf0, 0x23, 0x34, 0xa3, 0xfe, 0x6a, 0x9a, 0x0d,
|
||||
0x73, 0xf5, 0x74, 0x1b, 0x66, 0xcf, 0x0f, 0x20, 0x5d, 0x94, 0xea, 0x44, 0x34, 0x0a, 0xbe, 0x82,
|
||||
0xe6, 0x03, 0x10, 0x82, 0xb6, 0x4c, 0xe4, 0xf4, 0xab, 0xaf, 0x91, 0x88, 0xc9, 0xb1, 0xbe, 0x7a,
|
||||
0x07, 0x5d, 0x1a, 0xf3, 0x1d, 0x8d, 0x2b, 0x68, 0xd6, 0xd5, 0xff, 0x85, 0x54, 0x42, 0xb3, 0xce,
|
||||
0xa2, 0xda, 0x32, 0x9b, 0xfa, 0x2f, 0x50, 0x22, 0x77, 0xae, 0xbf, 0x7c, 0x53, 0x9e, 0x7a, 0xf5,
|
||||
0xa6, 0x3c, 0xf5, 0xfa, 0x4d, 0x79, 0xea, 0xeb, 0xb8, 0x6c, 0xbd, 0x8c, 0xcb, 0xd6, 0xab, 0xb8,
|
||||
0x6c, 0xbd, 0x8e, 0xcb, 0xd6, 0x5f, 0x71, 0xd9, 0xfa, 0xee, 0xef, 0xf2, 0xd4, 0xd3, 0x79, 0xd3,
|
||||
0xef, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xce, 0x65, 0xbb, 0xc7, 0x7f, 0x10, 0x00, 0x00,
|
||||
}
|
||||
|
@ -144,6 +144,10 @@ message CSINodeDriver {
|
||||
// This can be empty if driver does not support topology.
|
||||
// +optional
|
||||
repeated string topologyKeys = 3;
|
||||
|
||||
// allocatable represents the volume resources of a node that are available for scheduling.
|
||||
// +optional
|
||||
optional VolumeNodeResources allocatable = 4;
|
||||
}
|
||||
|
||||
// CSINodeList is a collection of CSINode objects.
|
||||
@ -330,3 +334,13 @@ message VolumeError {
|
||||
optional string message = 2;
|
||||
}
|
||||
|
||||
// VolumeNodeResources is a set of resource limits for scheduling of volumes.
|
||||
message VolumeNodeResources {
|
||||
// Maximum number of unique volumes managed by the CSI driver that can be used on a node.
|
||||
// A volume that is both attached and mounted on a node is considered to be used once, not twice.
|
||||
// The same rule applies for a unique volume that is shared among multiple pods on the same node.
|
||||
// If this field is nil, then the supported number of volumes on this node is unbounded.
|
||||
// +optional
|
||||
optional int32 count = 1;
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,7 @@ limitations under the License.
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
@ -357,6 +357,20 @@ type CSINodeDriver struct {
|
||||
// This can be empty if driver does not support topology.
|
||||
// +optional
|
||||
TopologyKeys []string `json:"topologyKeys" protobuf:"bytes,3,rep,name=topologyKeys"`
|
||||
|
||||
// allocatable represents the volume resources of a node that are available for scheduling.
|
||||
// +optional
|
||||
Allocatable *VolumeNodeResources `json:"allocatable,omitempty" protobuf:"bytes,4,opt,name=allocatable"`
|
||||
}
|
||||
|
||||
// VolumeNodeResources is a set of resource limits for scheduling of volumes.
|
||||
type VolumeNodeResources struct {
|
||||
// Maximum number of unique volumes managed by the CSI driver that can be used on a node.
|
||||
// A volume that is both attached and mounted on a node is considered to be used once, not twice.
|
||||
// The same rule applies for a unique volume that is shared among multiple pods on the same node.
|
||||
// If this field is nil, then the supported number of volumes on this node is unbounded.
|
||||
// +optional
|
||||
Count *int32 `json:"count,omitempty" protobuf:"varint,1,opt,name=count"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
@ -72,6 +72,7 @@ var map_CSINodeDriver = map[string]string{
|
||||
"name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.",
|
||||
"nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.",
|
||||
"topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.",
|
||||
"allocatable": "allocatable represents the volume resources of a node that are available for scheduling.",
|
||||
}
|
||||
|
||||
func (CSINodeDriver) SwaggerDoc() map[string]string {
|
||||
@ -186,4 +187,13 @@ func (VolumeError) SwaggerDoc() map[string]string {
|
||||
return map_VolumeError
|
||||
}
|
||||
|
||||
var map_VolumeNodeResources = map[string]string{
|
||||
"": "VolumeNodeResources is a set of resource limits for scheduling of volumes.",
|
||||
"count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded.",
|
||||
}
|
||||
|
||||
func (VolumeNodeResources) SwaggerDoc() map[string]string {
|
||||
return map_VolumeNodeResources
|
||||
}
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS END HERE
|
||||
|
@ -146,6 +146,11 @@ func (in *CSINodeDriver) DeepCopyInto(out *CSINodeDriver) {
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Allocatable != nil {
|
||||
in, out := &in.Allocatable, &out.Allocatable
|
||||
*out = new(VolumeNodeResources)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -461,3 +466,24 @@ func (in *VolumeError) DeepCopy() *VolumeError {
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *VolumeNodeResources) DeepCopyInto(out *VolumeNodeResources) {
|
||||
*out = *in
|
||||
if in.Count != nil {
|
||||
in, out := &in.Count, &out.Count
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeResources.
|
||||
func (in *VolumeNodeResources) DeepCopy() *VolumeNodeResources {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(VolumeNodeResources)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
@ -47,7 +47,10 @@
|
||||
"nodeID": "25",
|
||||
"topologyKeys": [
|
||||
"26"
|
||||
]
|
||||
],
|
||||
"allocatable": {
|
||||
"count": -1821918122
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
Binary file not shown.
@ -33,7 +33,9 @@ metadata:
|
||||
uid: ą飋īqJ枊a8衍`Ĩɘ.蘯6ċV夸e
|
||||
spec:
|
||||
drivers:
|
||||
- name: "24"
|
||||
- allocatable:
|
||||
count: -1821918122
|
||||
name: "24"
|
||||
nodeID: "25"
|
||||
topologyKeys:
|
||||
- "26"
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -33,7 +34,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
@ -357,12 +357,12 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
||||
init(testParameters{nodeSelectorKey: nodeSelectorKey, attachLimit: 2})
|
||||
defer cleanup()
|
||||
nodeName := m.config.ClientNodeName
|
||||
attachKey := v1.ResourceName(volumeutil.GetCSIAttachLimitKey(m.provisioner))
|
||||
driverName := m.config.GetUniqueDriverName()
|
||||
|
||||
nodeAttachLimit, err := checkNodeForLimits(nodeName, attachKey, m.cs)
|
||||
framework.ExpectNoError(err, "while fetching node %v", err)
|
||||
csiNodeAttachLimit, err := checkCSINodeForLimits(nodeName, driverName, m.cs)
|
||||
framework.ExpectNoError(err, "while checking limits in CSINode: %v", err)
|
||||
|
||||
gomega.Expect(nodeAttachLimit).To(gomega.Equal(2))
|
||||
gomega.Expect(csiNodeAttachLimit).To(gomega.BeNumerically("==", 2))
|
||||
|
||||
_, _, pod1 := createPod()
|
||||
gomega.Expect(pod1).NotTo(gomega.BeNil(), "while creating first pod")
|
||||
@ -576,25 +576,21 @@ func waitForMaxVolumeCondition(pod *v1.Pod, cs clientset.Interface) error {
|
||||
return waitErr
|
||||
}
|
||||
|
||||
func checkNodeForLimits(nodeName string, attachKey v1.ResourceName, cs clientset.Interface) (int, error) {
|
||||
var attachLimit int64
|
||||
func checkCSINodeForLimits(nodeName string, driverName string, cs clientset.Interface) (int32, error) {
|
||||
var attachLimit int32
|
||||
|
||||
waitErr := wait.PollImmediate(10*time.Second, csiNodeLimitUpdateTimeout, func() (bool, error) {
|
||||
node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
csiNode, err := cs.StorageV1beta1().CSINodes().Get(nodeName, metav1.GetOptions{})
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
limits := getVolumeLimit(node)
|
||||
var ok bool
|
||||
if len(limits) > 0 {
|
||||
attachLimit, ok = limits[attachKey]
|
||||
if ok {
|
||||
return true, nil
|
||||
}
|
||||
attachLimit = getVolumeLimitFromCSINode(csiNode, driverName)
|
||||
if attachLimit > 0 {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
return int(attachLimit), waitErr
|
||||
return attachLimit, waitErr
|
||||
}
|
||||
|
||||
func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node framework.NodeSelection, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
|
||||
@ -805,3 +801,15 @@ func getVolumeHandle(cs clientset.Interface, claim *v1.PersistentVolumeClaim) st
|
||||
}
|
||||
return pv.Spec.CSI.VolumeHandle
|
||||
}
|
||||
|
||||
func getVolumeLimitFromCSINode(csiNode *storagev1beta1.CSINode, driverName string) int32 {
|
||||
for _, d := range csiNode.Spec.Drivers {
|
||||
if d.Name != driverName {
|
||||
continue
|
||||
}
|
||||
if d.Allocatable != nil && d.Allocatable.Count != nil {
|
||||
return *d.Allocatable.Count
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -131,6 +131,7 @@ func setupScheduler(
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
informerFactory.Storage().V1beta1().CSINodes(),
|
||||
)
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@ -254,6 +254,7 @@ priorities: []
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
informerFactory.Storage().V1beta1().CSINodes(),
|
||||
eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
|
||||
kubeschedulerconfig.SchedulerAlgorithmSource{
|
||||
Policy: &kubeschedulerconfig.SchedulerPolicySource{
|
||||
@ -325,6 +326,7 @@ func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) {
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
informerFactory.Storage().V1beta1().CSINodes(),
|
||||
eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: v1.DefaultSchedulerName}),
|
||||
kubeschedulerconfig.SchedulerAlgorithmSource{
|
||||
Policy: &kubeschedulerconfig.SchedulerPolicySource{
|
||||
@ -621,6 +623,7 @@ func TestMultiScheduler(t *testing.T) {
|
||||
context.informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
context.informerFactory.Core().V1().Services(),
|
||||
context.informerFactory.Storage().V1().StorageClasses(),
|
||||
context.informerFactory.Storage().V1beta1().CSINodes(),
|
||||
)
|
||||
|
||||
go podInformer2.Informer().Run(stopCh)
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policy "k8s.io/api/policy/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
@ -215,6 +215,7 @@ func initTestSchedulerWithOptions(
|
||||
context.informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
context.informerFactory.Core().V1().Services(),
|
||||
context.informerFactory.Storage().V1().StorageClasses(),
|
||||
context.informerFactory.Storage().V1beta1().CSINodes(),
|
||||
)
|
||||
|
||||
// set setPodInformer if provided.
|
||||
|
@ -20,7 +20,7 @@ import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
clientv1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
@ -28,6 +28,7 @@ import (
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
|
||||
// import DefaultProvider
|
||||
_ "k8s.io/kubernetes/pkg/scheduler/algorithmprovider/defaults"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||
@ -84,6 +85,7 @@ func StartScheduler(clientSet clientset.Interface) (factory.Configurator, Shutdo
|
||||
informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
informerFactory.Core().V1().Services(),
|
||||
informerFactory.Storage().V1().StorageClasses(),
|
||||
informerFactory.Storage().V1beta1().CSINodes(),
|
||||
)
|
||||
|
||||
informerFactory.Start(stopCh)
|
||||
|
Loading…
Reference in New Issue
Block a user