mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-13 22:05:59 +00:00
fix the integration test
This commit is contained in:
parent
a7eb7ed5c6
commit
be14b026e3
@ -808,6 +808,11 @@ func TestPostFilterPlugin(t *testing.T) {
|
||||
{Name: filterPluginName},
|
||||
},
|
||||
},
|
||||
PreScore: configv1.PluginSet{
|
||||
Disabled: []configv1.Plugin{
|
||||
{Name: "*"},
|
||||
},
|
||||
},
|
||||
Score: configv1.PluginSet{
|
||||
Enabled: []configv1.Plugin{
|
||||
{Name: scorePluginName},
|
||||
|
@ -72,11 +72,16 @@ const (
|
||||
)
|
||||
|
||||
// This file tests the scheduler priority functions.
|
||||
func initTestSchedulerForPriorityTest(t *testing.T, scorePluginName string) *testutils.TestContext {
|
||||
cfg := configtesting.V1ToInternalWithDefaults(t, configv1.KubeSchedulerConfiguration{
|
||||
func initTestSchedulerForPriorityTest(t *testing.T, preScorePluginName, scorePluginName string) *testutils.TestContext {
|
||||
cc := configv1.KubeSchedulerConfiguration{
|
||||
Profiles: []configv1.KubeSchedulerProfile{{
|
||||
SchedulerName: pointer.String(v1.DefaultSchedulerName),
|
||||
Plugins: &configv1.Plugins{
|
||||
PreScore: configv1.PluginSet{
|
||||
Disabled: []configv1.Plugin{
|
||||
{Name: "*"},
|
||||
},
|
||||
},
|
||||
Score: configv1.PluginSet{
|
||||
Enabled: []configv1.Plugin{
|
||||
{Name: scorePluginName, Weight: pointer.Int32(1)},
|
||||
@ -87,7 +92,11 @@ func initTestSchedulerForPriorityTest(t *testing.T, scorePluginName string) *tes
|
||||
},
|
||||
},
|
||||
}},
|
||||
})
|
||||
}
|
||||
if preScorePluginName != "" {
|
||||
cc.Profiles[0].Plugins.PreScore.Enabled = append(cc.Profiles[0].Plugins.PreScore.Enabled, configv1.Plugin{Name: preScorePluginName})
|
||||
}
|
||||
cfg := configtesting.V1ToInternalWithDefaults(t, cc)
|
||||
testCtx := testutils.InitTestSchedulerWithOptions(
|
||||
t,
|
||||
testutils.InitTestAPIServer(t, strings.ToLower(scorePluginName), nil),
|
||||
@ -201,7 +210,7 @@ func TestNodeResourcesScoring(t *testing.T) {
|
||||
// TestNodeAffinityScoring verifies that scheduler's node affinity priority function
|
||||
// works correctly.
|
||||
func TestNodeAffinityScoring(t *testing.T) {
|
||||
testCtx := initTestSchedulerForPriorityTest(t, nodeaffinity.Name)
|
||||
testCtx := initTestSchedulerForPriorityTest(t, nodeaffinity.Name, nodeaffinity.Name)
|
||||
// Add a few nodes.
|
||||
_, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode(), 4)
|
||||
if err != nil {
|
||||
@ -320,7 +329,7 @@ func TestPodAffinityScoring(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
testCtx := initTestSchedulerForPriorityTest(t, interpodaffinity.Name)
|
||||
testCtx := initTestSchedulerForPriorityTest(t, interpodaffinity.Name, interpodaffinity.Name)
|
||||
// Add a few nodes.
|
||||
nodesInTopology, err := createAndWaitForNodesInCache(testCtx, "in-topology", st.MakeNode().Label(topologyKey, topologyValue), 5)
|
||||
if err != nil {
|
||||
@ -364,7 +373,7 @@ func TestPodAffinityScoring(t *testing.T) {
|
||||
// TestImageLocalityScoring verifies that the scheduler's image locality priority function
|
||||
// works correctly, i.e., the pod gets scheduled to the node where its container images are ready.
|
||||
func TestImageLocalityScoring(t *testing.T) {
|
||||
testCtx := initTestSchedulerForPriorityTest(t, imagelocality.Name)
|
||||
testCtx := initTestSchedulerForPriorityTest(t, "", imagelocality.Name)
|
||||
|
||||
// Create a node with the large image.
|
||||
// We use a fake large image as the test image used by the pod, which has
|
||||
@ -596,7 +605,7 @@ func TestPodTopologySpreadScoring(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeInclusionPolicyInPodTopologySpread, tt.enableNodeInclusionPolicy)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MatchLabelKeysInPodTopologySpread, tt.enableMatchLabelKeys)()
|
||||
|
||||
testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name)
|
||||
testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name, podtopologyspread.Name)
|
||||
cs := testCtx.ClientSet
|
||||
ns := testCtx.NS.Name
|
||||
|
||||
@ -646,7 +655,7 @@ func TestPodTopologySpreadScoring(t *testing.T) {
|
||||
// with the system default spreading spreads Pods belonging to a Service.
|
||||
// The setup has 300 nodes over 3 zones.
|
||||
func TestDefaultPodTopologySpreadScoring(t *testing.T) {
|
||||
testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name)
|
||||
testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name, podtopologyspread.Name)
|
||||
cs := testCtx.ClientSet
|
||||
ns := testCtx.NS.Name
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user