mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-02-21 22:57:15 +00:00
DRA integration: more pods per node, more parallelism
Long running tests like TestDRA/all/DeviceBindingConditions (42.50s) should run in parallel with other tests, otherwise the overall runtime is too high. This then must allow more pods per node to avoid blocking scheduling.
This commit is contained in:
@@ -42,6 +42,8 @@ var (
|
||||
// testDeviceBindingConditions is the entry point for running each integration test that verifies DeviceBindingConditions.
|
||||
// Some of these tests use device taints, and they assume that DRADeviceTaints is enabled.
|
||||
func testDeviceBindingConditions(tCtx ktesting.TContext, enabled bool) {
|
||||
tCtx.Parallel()
|
||||
|
||||
tCtx.Run("BasicFlow", func(tCtx ktesting.TContext) { testDeviceBindingConditionsBasicFlow(tCtx, enabled) })
|
||||
if enabled {
|
||||
tCtx.Run("FailureTaints", func(tCtx ktesting.TContext) { testDeviceBindingFailureConditionsReschedule(tCtx, true) })
|
||||
|
||||
@@ -118,8 +118,11 @@ var (
|
||||
Namespace(namespace).
|
||||
RequestWithPrioritizedList(st.SubRequest("subreq-1", className, 1)).
|
||||
Obj()
|
||||
)
|
||||
|
||||
numNodes = 8
|
||||
const (
|
||||
numNodes = 8
|
||||
maxPodsPerNode = 5000 // This should never be the limiting factor, no matter how many tests run in parallel.
|
||||
)
|
||||
|
||||
func TestDRA(t *testing.T) {
|
||||
@@ -226,6 +229,8 @@ func TestDRA(t *testing.T) {
|
||||
features.DRAExtendedResource: true,
|
||||
},
|
||||
f: func(tCtx ktesting.TContext) {
|
||||
// These tests must run in parallel as much as possible to keep overall runtime low!
|
||||
|
||||
tCtx.Run("AdminAccess", func(tCtx ktesting.TContext) { testAdminAccess(tCtx, true) })
|
||||
tCtx.Run("Convert", testConvert)
|
||||
tCtx.Run("ControllerManagerMetrics", testControllerManagerMetrics)
|
||||
@@ -301,7 +306,7 @@ func createNodes(tCtx ktesting.TContext) {
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100"),
|
||||
v1.ResourceMemory: resource.MustParse("1000"),
|
||||
v1.ResourcePods: resource.MustParse("100"),
|
||||
v1.ResourcePods: *resource.NewScaledQuantity(maxPodsPerNode, 0),
|
||||
},
|
||||
Phase: v1.NodeRunning,
|
||||
Conditions: []v1.NodeCondition{
|
||||
@@ -478,6 +483,8 @@ func testConvert(tCtx ktesting.TContext) {
|
||||
// when the AdminAccess feature is enabled, it also checks that the field
|
||||
// is only allowed to be used in namespace with the Resource Admin Access label
|
||||
func testAdminAccess(tCtx ktesting.TContext, adminAccessEnabled bool) {
|
||||
tCtx.Parallel()
|
||||
|
||||
namespace := createTestNamespace(tCtx, nil)
|
||||
claim1 := claim.DeepCopy()
|
||||
claim1.Namespace = namespace
|
||||
@@ -1192,6 +1199,8 @@ func testPublishResourceSlices(tCtx ktesting.TContext, haveLatestAPI bool, disab
|
||||
//
|
||||
// When enabled, it tries server-side-apply (SSA) with different clients. This is what DRA drivers should be using.
|
||||
func testResourceClaimDeviceStatus(tCtx ktesting.TContext, enabled bool) {
|
||||
tCtx.Parallel()
|
||||
|
||||
namespace := createTestNamespace(tCtx, nil)
|
||||
|
||||
claim := &resourceapi.ResourceClaim{
|
||||
@@ -1386,7 +1395,8 @@ func testMaxResourceSlice(tCtx ktesting.TContext) {
|
||||
}
|
||||
}
|
||||
|
||||
// testControllerManagerMetrics tests ResourceClaim metrics
|
||||
// testControllerManagerMetrics tests ResourceClaim metrics.
|
||||
// It must run sequentially.
|
||||
func testControllerManagerMetrics(tCtx ktesting.TContext) {
|
||||
namespace := createTestNamespace(tCtx, nil)
|
||||
class, _ := createTestClass(tCtx, namespace)
|
||||
|
||||
Reference in New Issue
Block a user