mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 22:17:14 +00:00
feat: remove FakePDBLister
This commit is contained in:
parent
e1f86e3460
commit
ce33fcc311
@ -29,6 +29,8 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
"k8s.io/client-go/informers"
|
||||||
|
clientsetfake "k8s.io/client-go/kubernetes/fake"
|
||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||||
@ -39,7 +41,6 @@ import (
|
|||||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler/util"
|
"k8s.io/kubernetes/pkg/scheduler/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -533,6 +534,9 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
|
|||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
client := clientsetfake.NewSimpleClientset()
|
||||||
|
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||||
|
|
||||||
extenders := []algorithm.SchedulerExtender{}
|
extenders := []algorithm.SchedulerExtender{}
|
||||||
for ii := range test.extenders {
|
for ii := range test.extenders {
|
||||||
extenders = append(extenders, &test.extenders[ii])
|
extenders = append(extenders, &test.extenders[ii])
|
||||||
@ -552,8 +556,8 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
|
|||||||
emptyFramework,
|
emptyFramework,
|
||||||
extenders,
|
extenders,
|
||||||
nil,
|
nil,
|
||||||
schedulertesting.FakePersistentVolumeClaimLister{},
|
informerFactory.Core().V1().PersistentVolumeClaims().Lister(),
|
||||||
schedulertesting.FakePDBLister{},
|
informerFactory.Policy().V1beta1().PodDisruptionBudgets().Lister(),
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
schedulerapi.DefaultPercentageOfNodesToScore,
|
schedulerapi.DefaultPercentageOfNodesToScore,
|
||||||
|
@ -642,6 +642,9 @@ func TestGenericScheduler(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
client := clientsetfake.NewSimpleClientset()
|
||||||
|
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||||
|
|
||||||
filterPlugin.failedNodeReturnCodeMap = test.filterFailedNodeReturnCodeMap
|
filterPlugin.failedNodeReturnCodeMap = test.filterFailedNodeReturnCodeMap
|
||||||
|
|
||||||
cache := internalcache.New(time.Duration(0), wait.NeverStop)
|
cache := internalcache.New(time.Duration(0), wait.NeverStop)
|
||||||
@ -671,7 +674,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||||||
[]algorithm.SchedulerExtender{},
|
[]algorithm.SchedulerExtender{},
|
||||||
nil,
|
nil,
|
||||||
pvcLister,
|
pvcLister,
|
||||||
schedulertesting.FakePDBLister{},
|
informerFactory.Policy().V1beta1().PodDisruptionBudgets().Lister(),
|
||||||
test.alwaysCheckAllPredicates,
|
test.alwaysCheckAllPredicates,
|
||||||
false,
|
false,
|
||||||
schedulerapi.DefaultPercentageOfNodesToScore,
|
schedulerapi.DefaultPercentageOfNodesToScore,
|
||||||
@ -1377,6 +1380,9 @@ func TestSelectNodesForPreemption(t *testing.T) {
|
|||||||
labelKeys := []string{"hostname", "zone", "region"}
|
labelKeys := []string{"hostname", "zone", "region"}
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
client := clientsetfake.NewSimpleClientset()
|
||||||
|
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||||
|
|
||||||
filterFailedNodeReturnCodeMap := map[string]framework.Code{}
|
filterFailedNodeReturnCodeMap := map[string]framework.Code{}
|
||||||
cache := internalcache.New(time.Duration(0), wait.NeverStop)
|
cache := internalcache.New(time.Duration(0), wait.NeverStop)
|
||||||
for _, pod := range test.pods {
|
for _, pod := range test.pods {
|
||||||
@ -1399,7 +1405,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
|
|||||||
[]algorithm.SchedulerExtender{},
|
[]algorithm.SchedulerExtender{},
|
||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
schedulertesting.FakePDBLister{},
|
informerFactory.Policy().V1beta1().PodDisruptionBudgets().Lister(),
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
schedulerapi.DefaultPercentageOfNodesToScore,
|
schedulerapi.DefaultPercentageOfNodesToScore,
|
||||||
@ -2085,6 +2091,9 @@ func TestPreempt(t *testing.T) {
|
|||||||
labelKeys := []string{"hostname", "zone", "region"}
|
labelKeys := []string{"hostname", "zone", "region"}
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
client := clientsetfake.NewSimpleClientset()
|
||||||
|
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||||
|
|
||||||
t.Logf("===== Running test %v", t.Name())
|
t.Logf("===== Running test %v", t.Name())
|
||||||
stop := make(chan struct{})
|
stop := make(chan struct{})
|
||||||
cache := internalcache.New(time.Duration(0), stop)
|
cache := internalcache.New(time.Duration(0), stop)
|
||||||
@ -2137,8 +2146,8 @@ func TestPreempt(t *testing.T) {
|
|||||||
emptyFramework,
|
emptyFramework,
|
||||||
extenders,
|
extenders,
|
||||||
nil,
|
nil,
|
||||||
schedulertesting.FakePersistentVolumeClaimLister{},
|
informerFactory.Core().V1().PersistentVolumeClaims().Lister(),
|
||||||
schedulertesting.FakePDBLister{},
|
informerFactory.Policy().V1beta1().PodDisruptionBudgets().Lister(),
|
||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
schedulerapi.DefaultPercentageOfNodesToScore,
|
schedulerapi.DefaultPercentageOfNodesToScore,
|
||||||
|
@ -14,12 +14,10 @@ go_library(
|
|||||||
"//pkg/scheduler/algorithm:go_default_library",
|
"//pkg/scheduler/algorithm:go_default_library",
|
||||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/listers/policy/v1beta1:go_default_library",
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -21,12 +21,10 @@ import (
|
|||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
policy "k8s.io/api/policy/v1beta1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||||
corelisters "k8s.io/client-go/listers/core/v1"
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
policylisters "k8s.io/client-go/listers/policy/v1beta1"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -236,21 +234,3 @@ func (f *fakePersistentVolumeClaimNamespaceLister) Get(name string) (*v1.Persist
|
|||||||
func (f fakePersistentVolumeClaimNamespaceLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) {
|
func (f fakePersistentVolumeClaimNamespaceLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) {
|
||||||
return nil, fmt.Errorf("not implemented")
|
return nil, fmt.Errorf("not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
// FakePDBLister implements PDBLister on a slice of PodDisruptionBudgets for test purposes.
|
|
||||||
type FakePDBLister []*policy.PodDisruptionBudget
|
|
||||||
|
|
||||||
// List returns a list of PodDisruptionBudgets.
|
|
||||||
func (f FakePDBLister) List(labels.Selector) ([]*policy.PodDisruptionBudget, error) {
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PodDisruptionBudgets returns nil.
|
|
||||||
func (f FakePDBLister) PodDisruptionBudgets(namespace string) policylisters.PodDisruptionBudgetNamespaceLister {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPodPodDisruptionBudgets returns nil.
|
|
||||||
func (f FakePDBLister) GetPodPodDisruptionBudgets(pod *v1.Pod) ([]*policy.PodDisruptionBudget, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
Loading…
Reference in New Issue
Block a user