Merge pull request #127612 from carlory/make-scheduler-test-independent

make each scheduler test independent
This commit is contained in:
Kubernetes Prow Robot 2025-03-13 00:47:47 -07:00 committed by GitHub
commit be32ca61a6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 28 additions and 14 deletions

View File

@ -46,6 +46,10 @@ var nodeInfoCmpOpts = []cmp.Option{
cmpopts.IgnoreFields(framework.PodInfo{}, "cachedResource"),
}
func init() {
metrics.Register()
}
func deepEqualWithoutGeneration(actual *nodeInfoListItem, expected *framework.NodeInfo) error {
if (actual == nil) != (expected == nil) {
return errors.New("one of the actual or expected is nil and the other is not")
@ -273,7 +277,6 @@ func assumeAndFinishBinding(logger klog.Logger, cache *cacheImpl, pod *v1.Pod, a
// TestExpirePod tests that assumed pods will be removed if expired.
// The removal will be reflected in node info.
func TestExpirePod(t *testing.T) {
metrics.Register()
nodeName := "node"
testPods := []*v1.Pod{
makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostIP: "127.0.0.1", HostPort: 80, Protocol: "TCP"}}),

View File

@ -29,7 +29,6 @@ import (
func TestClose(t *testing.T) {
logger, ctx := ktesting.NewTestContext(t)
metrics.Register()
rr := metrics.NewMetricsAsyncRecorder(10, time.Second, ctx.Done())
aq := newActiveQueue(heap.NewWithRecorder(podInfoKeyFunc, heap.LessFunc[*framework.QueuedPodInfo](newDefaultQueueSort()), metrics.NewActivePodsRecorder()), true, *rr)

View File

@ -60,7 +60,6 @@ import (
)
func TestEventHandlers_MoveToActiveOnNominatedNodeUpdate(t *testing.T) {
metrics.Register()
highPriorityPod :=
st.MakePod().Name("hpp").Namespace("ns1").UID("hppns1").Priority(highPriority).SchedulerName(testSchedulerName).Obj()
@ -210,7 +209,6 @@ func newDefaultQueueSort() framework.LessFunc {
func TestUpdatePodInCache(t *testing.T) {
ttl := 10 * time.Second
nodeName := "node"
metrics.Register()
tests := []struct {
name string

View File

@ -98,6 +98,10 @@ var (
epochTime6 = metav1.NewTime(time.Unix(0, 6))
)
func init() {
metrics.Register()
}
func getDefaultDefaultPreemptionArgs() *config.DefaultPreemptionArgs {
v1dpa := &kubeschedulerconfigv1.DefaultPreemptionArgs{}
configv1.SetDefaults_DefaultPreemptionArgs(v1dpa)
@ -155,7 +159,6 @@ const (
)
func TestPostFilter(t *testing.T) {
metrics.Register()
onePodRes := map[v1.ResourceName]string{v1.ResourcePods: "1"}
nodeRes := map[v1.ResourceName]string{v1.ResourceCPU: "200m", v1.ResourceMemory: "400"}
tests := []struct {
@ -471,7 +474,6 @@ type candidate struct {
}
func TestDryRunPreemption(t *testing.T) {
metrics.Register()
tests := []struct {
name string
args *config.DefaultPreemptionArgs

View File

@ -43,6 +43,10 @@ var (
}
)
func init() {
metrics.Register()
}
func createPodWithAffinityTerms(namespace, nodeName string, labels map[string]string, affinity, antiAffinity []v1.PodAffinityTerm) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
@ -73,7 +77,6 @@ func TestRequiredAffinitySingleNode(t *testing.T) {
}
podLabel2 := map[string]string{"security": "S1"}
node1 := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "node1", Labels: labels1}}
metrics.Register()
tests := []struct {
pod *v1.Pod

View File

@ -63,6 +63,10 @@ var (
taints = []v1.Taint{{Key: v1.TaintNodeUnschedulable, Value: "", Effect: v1.TaintEffectNoSchedule}}
)
func init() {
metrics.Register()
}
func (p *criticalPaths) sort() {
if p[0].MatchNum == p[1].MatchNum && p[0].TopologyValue > p[1].TopologyValue {
// Swap TopologyValue to make them sorted alphabetically.
@ -71,7 +75,6 @@ func (p *criticalPaths) sort() {
}
func TestPreFilterState(t *testing.T) {
metrics.Register()
tests := []struct {
name string
pod *v1.Pod
@ -2316,7 +2319,6 @@ func TestPreFilterStateRemovePod(t *testing.T) {
}
func BenchmarkFilter(b *testing.B) {
metrics.Register()
tests := []struct {
name string
pod *v1.Pod

View File

@ -63,6 +63,10 @@ var (
}
)
func init() {
metrics.Register()
}
type FakePostFilterPlugin struct {
numViolatingVictim int
}
@ -140,7 +144,6 @@ func (pl *FakePreemptionScorePostFilterPlugin) OrderedScoreFuncs(ctx context.Con
}
func TestDryRunPreemption(t *testing.T) {
metrics.Register()
tests := []struct {
name string
nodes []*v1.Node

View File

@ -66,6 +66,10 @@ const (
injectFilterReason = "injected filter status"
)
func init() {
metrics.Register()
}
// TestScoreWithNormalizePlugin implements ScoreWithNormalizePlugin interface.
// TestScorePlugin only implements ScorePlugin interface.
var _ framework.ScorePlugin = &TestScoreWithNormalizePlugin{}
@ -460,7 +464,6 @@ func newFrameworkWithQueueSortAndBind(ctx context.Context, r Registry, profile c
}
func TestInitFrameworkWithScorePlugins(t *testing.T) {
metrics.Register()
tests := []struct {
name string
plugins *config.Plugins
@ -2905,7 +2908,6 @@ func withMetricsRecorder(recorder *metrics.MetricAsyncRecorder) Option {
func TestRecordingMetrics(t *testing.T) {
state := &framework.CycleState{}
state.SetRecordPluginMetrics(true)
metrics.Register()
tests := []struct {
name string
action func(ctx context.Context, f framework.Framework)
@ -3089,7 +3091,6 @@ func TestRecordingMetrics(t *testing.T) {
}
func TestRunBindPlugins(t *testing.T) {
metrics.Register()
tests := []struct {
name string
injects []framework.Code
@ -3206,7 +3207,6 @@ func TestRunBindPlugins(t *testing.T) {
}
func TestPermitWaitDurationMetric(t *testing.T) {
metrics.Register()
tests := []struct {
name string
inject injectedResult

View File

@ -64,6 +64,10 @@ import (
utiltesting "k8s.io/kubernetes/test/utils/ktesting"
)
func init() {
metrics.Register()
}
func TestSchedulerCreation(t *testing.T) {
invalidRegistry := map[string]frameworkruntime.PluginFactory{
defaultbinder.Name: defaultbinder.New,