mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 23:15:14 +00:00
Convert scheduler to use CSINode GA
This commit is contained in:
parent
bf0182b9d3
commit
a59abc3cc3
@ -48,13 +48,11 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/events:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
|
@ -28,7 +28,6 @@ go_library(
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
@ -37,7 +36,6 @@ go_library(
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library",
|
||||
"//staging/src/k8s.io/csi-translation-lib:go_default_library",
|
||||
@ -67,7 +65,6 @@ go_test(
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
|
@ -20,11 +20,10 @@ import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
v1beta1storagelisters "k8s.io/client-go/listers/storage/v1beta1"
|
||||
csitrans "k8s.io/csi-translation-lib"
|
||||
"k8s.io/klog"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
@ -44,7 +43,7 @@ type InTreeToCSITranslator interface {
|
||||
|
||||
// CSIMaxVolumeLimitChecker defines predicate needed for counting CSI volumes
|
||||
type CSIMaxVolumeLimitChecker struct {
|
||||
csiNodeLister v1beta1storagelisters.CSINodeLister
|
||||
csiNodeLister storagelisters.CSINodeLister
|
||||
pvLister corelisters.PersistentVolumeLister
|
||||
pvcLister corelisters.PersistentVolumeClaimLister
|
||||
scLister storagelisters.StorageClassLister
|
||||
@ -56,7 +55,7 @@ type CSIMaxVolumeLimitChecker struct {
|
||||
|
||||
// NewCSIMaxVolumeLimitPredicate returns a predicate for counting CSI volumes
|
||||
func NewCSIMaxVolumeLimitPredicate(
|
||||
csiNodeLister v1beta1storagelisters.CSINodeLister, pvLister corelisters.PersistentVolumeLister, pvcLister corelisters.PersistentVolumeClaimLister, scLister storagelisters.StorageClassLister) FitPredicate {
|
||||
csiNodeLister storagelisters.CSINodeLister, pvLister corelisters.PersistentVolumeLister, pvcLister corelisters.PersistentVolumeClaimLister, scLister storagelisters.StorageClassLister) FitPredicate {
|
||||
c := &CSIMaxVolumeLimitChecker{
|
||||
csiNodeLister: csiNodeLister,
|
||||
pvLister: pvLister,
|
||||
@ -68,7 +67,7 @@ func NewCSIMaxVolumeLimitPredicate(
|
||||
return c.attachableLimitPredicate
|
||||
}
|
||||
|
||||
func getVolumeLimits(nodeInfo *schedulernodeinfo.NodeInfo, csiNode *storagev1beta1.CSINode) map[v1.ResourceName]int64 {
|
||||
func getVolumeLimits(nodeInfo *schedulernodeinfo.NodeInfo, csiNode *storagev1.CSINode) map[v1.ResourceName]int64 {
|
||||
// TODO: stop getting values from Node object in v1.18
|
||||
nodeVolumeLimits := nodeInfo.VolumeLimits()
|
||||
if csiNode != nil {
|
||||
@ -154,7 +153,7 @@ func (c *CSIMaxVolumeLimitChecker) attachableLimitPredicate(
|
||||
}
|
||||
|
||||
func (c *CSIMaxVolumeLimitChecker) filterAttachableVolumes(
|
||||
csiNode *storagev1beta1.CSINode, volumes []v1.Volume, namespace string, result map[string]string) error {
|
||||
csiNode *storagev1.CSINode, volumes []v1.Volume, namespace string, result map[string]string) error {
|
||||
for _, vol := range volumes {
|
||||
// CSI volumes can only be used as persistent volumes
|
||||
if vol.PersistentVolumeClaim == nil {
|
||||
@ -189,7 +188,7 @@ func (c *CSIMaxVolumeLimitChecker) filterAttachableVolumes(
|
||||
// getCSIDriverInfo returns the CSI driver name and volume ID of a given PVC.
|
||||
// If the PVC is from a migrated in-tree plugin, this function will return
|
||||
// the information of the CSI driver that the plugin has been migrated to.
|
||||
func (c *CSIMaxVolumeLimitChecker) getCSIDriverInfo(csiNode *storagev1beta1.CSINode, pvc *v1.PersistentVolumeClaim) (string, string) {
|
||||
func (c *CSIMaxVolumeLimitChecker) getCSIDriverInfo(csiNode *storagev1.CSINode, pvc *v1.PersistentVolumeClaim) (string, string) {
|
||||
pvName := pvc.Spec.VolumeName
|
||||
namespace := pvc.Namespace
|
||||
pvcName := pvc.Name
|
||||
@ -244,7 +243,7 @@ func (c *CSIMaxVolumeLimitChecker) getCSIDriverInfo(csiNode *storagev1beta1.CSIN
|
||||
}
|
||||
|
||||
// getCSIDriverInfoFromSC returns the CSI driver name and a random volume ID of a given PVC's StorageClass.
|
||||
func (c *CSIMaxVolumeLimitChecker) getCSIDriverInfoFromSC(csiNode *storagev1beta1.CSINode, pvc *v1.PersistentVolumeClaim) (string, string) {
|
||||
func (c *CSIMaxVolumeLimitChecker) getCSIDriverInfoFromSC(csiNode *storagev1.CSINode, pvc *v1.PersistentVolumeClaim) (string, string) {
|
||||
namespace := pvc.Namespace
|
||||
pvcName := pvc.Name
|
||||
scName := v1helper.GetPersistentVolumeClaimClass(pvc)
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
@ -546,7 +546,7 @@ func getFakeCSIPVCLister(volumeName, scName string, driverNames ...string) fakel
|
||||
return pvcLister
|
||||
}
|
||||
|
||||
func enableMigrationOnNode(csiNode *storagev1beta1.CSINode, pluginName string) {
|
||||
func enableMigrationOnNode(csiNode *storagev1.CSINode, pluginName string) {
|
||||
nodeInfoAnnotations := csiNode.GetAnnotations()
|
||||
if nodeInfoAnnotations == nil {
|
||||
nodeInfoAnnotations = map[string]string{}
|
||||
@ -569,7 +569,7 @@ func getFakeCSIStorageClassLister(scName, provisionerName string) fakelisters.St
|
||||
}
|
||||
}
|
||||
|
||||
func getFakeCSINodeLister(csiNode *storagev1beta1.CSINode) fakelisters.CSINodeLister {
|
||||
func getFakeCSINodeLister(csiNode *storagev1.CSINode) fakelisters.CSINodeLister {
|
||||
if csiNode != nil {
|
||||
return fakelisters.CSINodeLister(*csiNode)
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/storage/v1beta1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
||||
@ -1075,7 +1075,7 @@ func TestMaxVolumeFuncM4WithBothBetaAndStableLabels(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int64, driverNames ...string) (*schedulernodeinfo.NodeInfo, *v1beta1.CSINode) {
|
||||
func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int64, driverNames ...string) (*schedulernodeinfo.NodeInfo, *storagev1.CSINode) {
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo(pods...)
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"},
|
||||
@ -1083,7 +1083,7 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int
|
||||
Allocatable: v1.ResourceList{},
|
||||
},
|
||||
}
|
||||
var csiNode *v1beta1.CSINode
|
||||
var csiNode *storagev1.CSINode
|
||||
|
||||
addLimitToNode := func() {
|
||||
for _, driver := range driverNames {
|
||||
@ -1092,10 +1092,10 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int
|
||||
}
|
||||
|
||||
initCSINode := func() {
|
||||
csiNode = &v1beta1.CSINode{
|
||||
csiNode = &storagev1.CSINode{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "csi-node-for-max-pd-test-1"},
|
||||
Spec: v1beta1.CSINodeSpec{
|
||||
Drivers: []v1beta1.CSINodeDriver{},
|
||||
Spec: storagev1.CSINodeSpec{
|
||||
Drivers: []storagev1.CSINodeDriver{},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -1103,12 +1103,12 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int
|
||||
addDriversCSINode := func(addLimits bool) {
|
||||
initCSINode()
|
||||
for _, driver := range driverNames {
|
||||
driver := v1beta1.CSINodeDriver{
|
||||
driver := storagev1.CSINodeDriver{
|
||||
Name: driver,
|
||||
NodeID: "node-for-max-pd-test-1",
|
||||
}
|
||||
if addLimits {
|
||||
driver.Allocatable = &v1beta1.VolumeNodeResources{
|
||||
driver.Allocatable = &storagev1.VolumeNodeResources{
|
||||
Count: utilpointer.Int32Ptr(int32(limit)),
|
||||
}
|
||||
}
|
||||
|
@ -27,7 +27,6 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
v1beta1storage "k8s.io/api/storage/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@ -36,7 +35,6 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
v1beta1storagelisters "k8s.io/client-go/listers/storage/v1beta1"
|
||||
volumehelpers "k8s.io/cloud-provider/volume/helpers"
|
||||
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
@ -225,7 +223,7 @@ type MaxPDVolumeCountChecker struct {
|
||||
filter VolumeFilter
|
||||
volumeLimitKey v1.ResourceName
|
||||
maxVolumeFunc func(node *v1.Node) int
|
||||
csiNodeLister v1beta1storagelisters.CSINodeLister
|
||||
csiNodeLister storagelisters.CSINodeLister
|
||||
pvLister corelisters.PersistentVolumeLister
|
||||
pvcLister corelisters.PersistentVolumeClaimLister
|
||||
scLister storagelisters.StorageClassLister
|
||||
@ -244,7 +242,7 @@ type VolumeFilter struct {
|
||||
// MatchProvisioner evaluates if the StorageClass provisioner matches the running predicate
|
||||
MatchProvisioner func(sc *storage.StorageClass) (relevant bool)
|
||||
// IsMigrated returns a boolean specifying whether the plugin is migrated to a CSI driver
|
||||
IsMigrated func(csiNode *v1beta1storage.CSINode) bool
|
||||
IsMigrated func(csiNode *storage.CSINode) bool
|
||||
}
|
||||
|
||||
// NewMaxPDVolumeCountPredicate creates a predicate which evaluates whether a pod can fit based on the
|
||||
@ -257,7 +255,7 @@ type VolumeFilter struct {
|
||||
// The predicate looks for both volumes used directly, as well as PVC volumes that are backed by relevant volume
|
||||
// types, counts the number of unique volumes, and rejects the new pod if it would place the total count over
|
||||
// the maximum.
|
||||
func NewMaxPDVolumeCountPredicate(filterName string, csiNodeLister v1beta1storagelisters.CSINodeLister, scLister storagelisters.StorageClassLister,
|
||||
func NewMaxPDVolumeCountPredicate(filterName string, csiNodeLister storagelisters.CSINodeLister, scLister storagelisters.StorageClassLister,
|
||||
pvLister corelisters.PersistentVolumeLister, pvcLister corelisters.PersistentVolumeClaimLister) FitPredicate {
|
||||
var filter VolumeFilter
|
||||
var volumeLimitKey v1.ResourceName
|
||||
@ -441,7 +439,7 @@ func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta Metadata, nodeInfo
|
||||
}
|
||||
|
||||
var (
|
||||
csiNode *v1beta1storage.CSINode
|
||||
csiNode *storage.CSINode
|
||||
err error
|
||||
)
|
||||
if c.csiNodeLister != nil {
|
||||
@ -518,7 +516,7 @@ var EBSVolumeFilter = VolumeFilter{
|
||||
return false
|
||||
},
|
||||
|
||||
IsMigrated: func(csiNode *v1beta1storage.CSINode) bool {
|
||||
IsMigrated: func(csiNode *storage.CSINode) bool {
|
||||
return isCSIMigrationOn(csiNode, csilibplugins.AWSEBSInTreePluginName)
|
||||
},
|
||||
}
|
||||
@ -546,7 +544,7 @@ var GCEPDVolumeFilter = VolumeFilter{
|
||||
return false
|
||||
},
|
||||
|
||||
IsMigrated: func(csiNode *v1beta1storage.CSINode) bool {
|
||||
IsMigrated: func(csiNode *storage.CSINode) bool {
|
||||
return isCSIMigrationOn(csiNode, csilibplugins.GCEPDInTreePluginName)
|
||||
},
|
||||
}
|
||||
@ -574,7 +572,7 @@ var AzureDiskVolumeFilter = VolumeFilter{
|
||||
return false
|
||||
},
|
||||
|
||||
IsMigrated: func(csiNode *v1beta1storage.CSINode) bool {
|
||||
IsMigrated: func(csiNode *storage.CSINode) bool {
|
||||
return isCSIMigrationOn(csiNode, csilibplugins.AzureDiskInTreePluginName)
|
||||
},
|
||||
}
|
||||
@ -603,7 +601,7 @@ var CinderVolumeFilter = VolumeFilter{
|
||||
return false
|
||||
},
|
||||
|
||||
IsMigrated: func(csiNode *v1beta1storage.CSINode) bool {
|
||||
IsMigrated: func(csiNode *storage.CSINode) bool {
|
||||
return isCSIMigrationOn(csiNode, csilibplugins.CinderInTreePluginName)
|
||||
},
|
||||
}
|
||||
|
@ -19,8 +19,8 @@ package predicates
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
@ -97,7 +97,7 @@ func SetPredicatesOrderingDuringTest(value []string) func() {
|
||||
|
||||
// isCSIMigrationOn returns a boolean value indicating whether
|
||||
// the CSI migration has been enabled for a particular storage plugin.
|
||||
func isCSIMigrationOn(csiNode *storagev1beta1.CSINode, pluginName string) bool {
|
||||
func isCSIMigrationOn(csiNode *storagev1.CSINode, pluginName string) bool {
|
||||
if csiNode == nil || len(pluginName) == 0 {
|
||||
return false
|
||||
}
|
||||
|
@ -29,7 +29,6 @@ import (
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
policylisters "k8s.io/client-go/listers/policy/v1beta1"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
v1beta1storagelisters "k8s.io/client-go/listers/storage/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
@ -53,7 +52,7 @@ type PluginFactoryArgs struct {
|
||||
ReplicaSetLister appslisters.ReplicaSetLister
|
||||
StatefulSetLister appslisters.StatefulSetLister
|
||||
PDBLister policylisters.PodDisruptionBudgetLister
|
||||
CSINodeLister v1beta1storagelisters.CSINodeLister
|
||||
CSINodeLister storagelisters.CSINodeLister
|
||||
PVLister corelisters.PersistentVolumeLister
|
||||
PVCLister corelisters.PersistentVolumeClaimLister
|
||||
StorageClassLister storagelisters.StorageClassLister
|
||||
|
@ -34,13 +34,11 @@ import (
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
policyinformers "k8s.io/client-go/informers/policy/v1beta1"
|
||||
storageinformersv1 "k8s.io/client-go/informers/storage/v1"
|
||||
storageinformersv1beta1 "k8s.io/client-go/informers/storage/v1beta1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
policylisters "k8s.io/client-go/listers/policy/v1beta1"
|
||||
storagelistersv1 "k8s.io/client-go/listers/storage/v1"
|
||||
storagelistersv1beta1 "k8s.io/client-go/listers/storage/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/klog"
|
||||
@ -134,7 +132,7 @@ type Configurator struct {
|
||||
// a means to list all StorageClasses
|
||||
storageClassLister storagelistersv1.StorageClassLister
|
||||
// a means to list all CSINodes
|
||||
csiNodeLister storagelistersv1beta1.CSINodeLister
|
||||
csiNodeLister storagelistersv1.CSINodeLister
|
||||
// a means to list all Nodes
|
||||
nodeLister corelisters.NodeLister
|
||||
// a means to list all Pods
|
||||
@ -195,7 +193,7 @@ type ConfigFactoryArgs struct {
|
||||
ServiceInformer coreinformers.ServiceInformer
|
||||
PdbInformer policyinformers.PodDisruptionBudgetInformer
|
||||
StorageClassInformer storageinformersv1.StorageClassInformer
|
||||
CSINodeInformer storageinformersv1beta1.CSINodeInformer
|
||||
CSINodeInformer storageinformersv1.CSINodeInformer
|
||||
VolumeBinder *volumebinder.VolumeBinder
|
||||
SchedulerCache internalcache.Cache
|
||||
HardPodAffinitySymmetricWeight int32
|
||||
@ -225,7 +223,7 @@ func NewConfigFactory(args *ConfigFactoryArgs) *Configurator {
|
||||
storageClassLister = args.StorageClassInformer.Lister()
|
||||
}
|
||||
|
||||
var csiNodeLister storagelistersv1beta1.CSINodeLister
|
||||
var csiNodeLister storagelistersv1.CSINodeLister
|
||||
if args.CSINodeInformer != nil {
|
||||
csiNodeLister = args.CSINodeInformer.Lister()
|
||||
}
|
||||
|
@ -552,7 +552,7 @@ func newConfigFactoryWithFrameworkRegistry(
|
||||
ServiceInformer: informerFactory.Core().V1().Services(),
|
||||
PdbInformer: informerFactory.Policy().V1beta1().PodDisruptionBudgets(),
|
||||
StorageClassInformer: informerFactory.Storage().V1().StorageClasses(),
|
||||
CSINodeInformer: informerFactory.Storage().V1beta1().CSINodes(),
|
||||
CSINodeInformer: informerFactory.Storage().V1().CSINodes(),
|
||||
HardPodAffinitySymmetricWeight: hardPodAffinitySymmetricWeight,
|
||||
DisablePreemption: disablePodPreemption,
|
||||
PercentageOfNodesToScore: schedulerapi.DefaultPercentageOfNodesToScore,
|
||||
|
@ -22,7 +22,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@ -44,7 +44,7 @@ go_test(
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
|
@ -24,8 +24,7 @@ import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/storage/v1beta1"
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -545,7 +544,7 @@ func getFakeCSIPVCLister(volumeName, scName string, driverNames ...string) fakel
|
||||
return pvcLister
|
||||
}
|
||||
|
||||
func enableMigrationOnNode(csiNode *storagev1beta1.CSINode, pluginName string) {
|
||||
func enableMigrationOnNode(csiNode *storagev1.CSINode, pluginName string) {
|
||||
nodeInfoAnnotations := csiNode.GetAnnotations()
|
||||
if nodeInfoAnnotations == nil {
|
||||
nodeInfoAnnotations = map[string]string{}
|
||||
@ -568,14 +567,14 @@ func getFakeCSIStorageClassLister(scName, provisionerName string) fakelisters.St
|
||||
}
|
||||
}
|
||||
|
||||
func getFakeCSINodeLister(csiNode *storagev1beta1.CSINode) fakelisters.CSINodeLister {
|
||||
func getFakeCSINodeLister(csiNode *storagev1.CSINode) fakelisters.CSINodeLister {
|
||||
if csiNode != nil {
|
||||
return fakelisters.CSINodeLister(*csiNode)
|
||||
}
|
||||
return fakelisters.CSINodeLister{}
|
||||
}
|
||||
|
||||
func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int64, driverNames ...string) (*schedulernodeinfo.NodeInfo, *v1beta1.CSINode) {
|
||||
func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int64, driverNames ...string) (*schedulernodeinfo.NodeInfo, *storagev1.CSINode) {
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo(pods...)
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"},
|
||||
@ -583,7 +582,7 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int
|
||||
Allocatable: v1.ResourceList{},
|
||||
},
|
||||
}
|
||||
var csiNode *v1beta1.CSINode
|
||||
var csiNode *storagev1.CSINode
|
||||
|
||||
addLimitToNode := func() {
|
||||
for _, driver := range driverNames {
|
||||
@ -592,10 +591,10 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int
|
||||
}
|
||||
|
||||
initCSINode := func() {
|
||||
csiNode = &v1beta1.CSINode{
|
||||
csiNode = &storagev1.CSINode{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "csi-node-for-max-pd-test-1"},
|
||||
Spec: v1beta1.CSINodeSpec{
|
||||
Drivers: []v1beta1.CSINodeDriver{},
|
||||
Spec: storagev1.CSINodeSpec{
|
||||
Drivers: []storagev1.CSINodeDriver{},
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -603,12 +602,12 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int
|
||||
addDriversCSINode := func(addLimits bool) {
|
||||
initCSINode()
|
||||
for _, driver := range driverNames {
|
||||
driver := v1beta1.CSINodeDriver{
|
||||
driver := storagev1.CSINodeDriver{
|
||||
Name: driver,
|
||||
NodeID: "node-for-max-pd-test-1",
|
||||
}
|
||||
if addLimits {
|
||||
driver.Allocatable = &v1beta1.VolumeNodeResources{
|
||||
driver.Allocatable = &storagev1.VolumeNodeResources{
|
||||
Count: utilpointer.Int32Ptr(int32(limit)),
|
||||
}
|
||||
}
|
||||
|
@ -19,14 +19,14 @@ package nodevolumelimits
|
||||
import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
v1beta1 "k8s.io/client-go/listers/storage/v1beta1"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
// getCSINodeListerIfEnabled returns the CSINode lister or nil if the feature is disabled
|
||||
func getCSINodeListerIfEnabled(factory informers.SharedInformerFactory) v1beta1.CSINodeLister {
|
||||
func getCSINodeListerIfEnabled(factory informers.SharedInformerFactory) storagelisters.CSINodeLister {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CSINodeInfo) {
|
||||
return nil
|
||||
}
|
||||
return factory.Storage().V1beta1().CSINodes().Lister()
|
||||
return factory.Storage().V1().CSINodes().Lister()
|
||||
}
|
||||
|
@ -11,13 +11,11 @@ go_library(
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/storage/v1beta1:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -22,13 +22,11 @@ import (
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
v1beta1storagelisters "k8s.io/client-go/listers/storage/v1beta1"
|
||||
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
@ -280,19 +278,19 @@ func NewNodeInfoLister(nodes []*v1.Node) schedulerlisters.NodeInfoLister {
|
||||
return NodeInfoLister(nodeInfoList)
|
||||
}
|
||||
|
||||
var _ v1beta1storagelisters.CSINodeLister = CSINodeLister{}
|
||||
var _ storagelisters.CSINodeLister = CSINodeLister{}
|
||||
|
||||
// CSINodeLister declares a storagev1beta1.CSINode type for testing.
|
||||
type CSINodeLister storagev1beta1.CSINode
|
||||
// CSINodeLister declares a storagev1.CSINode type for testing.
|
||||
type CSINodeLister storagev1.CSINode
|
||||
|
||||
// Get returns a fake CSINode object.
|
||||
func (n CSINodeLister) Get(name string) (*storagev1beta1.CSINode, error) {
|
||||
csiNode := storagev1beta1.CSINode(n)
|
||||
func (n CSINodeLister) Get(name string) (*storagev1.CSINode, error) {
|
||||
csiNode := storagev1.CSINode(n)
|
||||
return &csiNode, nil
|
||||
}
|
||||
|
||||
// List lists all CSINodes in the indexer.
|
||||
func (n CSINodeLister) List(selector labels.Selector) (ret []*storagev1beta1.CSINode, err error) {
|
||||
func (n CSINodeLister) List(selector labels.Selector) (ret []*storagev1.CSINode, err error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,7 @@ import (
|
||||
"k8s.io/client-go/informers"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
policyv1beta1informers "k8s.io/client-go/informers/policy/v1beta1"
|
||||
storagev1beta1informers "k8s.io/client-go/informers/storage/v1beta1"
|
||||
storageinformers "k8s.io/client-go/informers/storage/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/events"
|
||||
@ -288,9 +288,9 @@ func New(client clientset.Interface,
|
||||
pdbInformer = informerFactory.Policy().V1beta1().PodDisruptionBudgets()
|
||||
}
|
||||
|
||||
var csiNodeInformer storagev1beta1informers.CSINodeInformer
|
||||
var csiNodeInformer storageinformers.CSINodeInformer
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CSINodeInfo) {
|
||||
csiNodeInformer = informerFactory.Storage().V1beta1().CSINodes()
|
||||
csiNodeInformer = informerFactory.Storage().V1().CSINodes()
|
||||
}
|
||||
|
||||
// Set up the configurator which can create schedulers from configs.
|
||||
|
Loading…
Reference in New Issue
Block a user