mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-15 06:01:50 +00:00
DRA CEL: add benchmark
Expression evaluation in all scenarios gets benchmarked where compilation works. A pending optimization in another PR caches compiled expressions, so the time for compilation will become less important. What matters is the actual evaluation.
This commit is contained in:
@@ -28,8 +28,7 @@ import (
|
|||||||
"k8s.io/utils/ptr"
|
"k8s.io/utils/ptr"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCompile(t *testing.T) {
|
var testcases = map[string]struct {
|
||||||
for name, scenario := range map[string]struct {
|
|
||||||
expression string
|
expression string
|
||||||
driver string
|
driver string
|
||||||
attributes map[resourceapi.QualifiedName]resourceapi.DeviceAttribute
|
attributes map[resourceapi.QualifiedName]resourceapi.DeviceAttribute
|
||||||
@@ -231,7 +230,10 @@ device.attributes["dra.example.com"]["version"].isGreaterThan(semver("0.0.1"))
|
|||||||
driver: "dra.example.com",
|
driver: "dra.example.com",
|
||||||
expectCost: 18446744073709551615, // Exceeds limit!
|
expectCost: 18446744073709551615, // Exceeds limit!
|
||||||
},
|
},
|
||||||
} {
|
}
|
||||||
|
|
||||||
|
func TestCEL(t *testing.T) {
|
||||||
|
for name, scenario := range testcases {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
result := GetCompiler().CompileCELExpression(scenario.expression, environment.StoredExpressions)
|
result := GetCompiler().CompileCELExpression(scenario.expression, environment.StoredExpressions)
|
||||||
@@ -284,3 +286,42 @@ device.attributes["dra.example.com"]["version"].isGreaterThan(semver("0.0.1"))
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func BenchmarkDeviceMatches(b *testing.B) {
|
||||||
|
for name, scenario := range testcases {
|
||||||
|
if scenario.expectCompileError != "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
b.Run(name, func(b *testing.B) {
|
||||||
|
_, ctx := ktesting.NewTestContext(b)
|
||||||
|
result := GetCompiler().CompileCELExpression(scenario.expression, environment.StoredExpressions)
|
||||||
|
if result.Error != nil {
|
||||||
|
b.Fatalf("unexpected compile error: %s", result.Error.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
// It would be nice to measure
|
||||||
|
// time/actual_cost, but the time as observed
|
||||||
|
// here also includes additional preparations
|
||||||
|
// in result.DeviceMatches and thus cannot be
|
||||||
|
// used.
|
||||||
|
match, _, err := result.DeviceMatches(ctx, Device{Attributes: scenario.attributes, Capacity: scenario.capacity, Driver: scenario.driver})
|
||||||
|
if err != nil {
|
||||||
|
if scenario.expectMatchError == "" {
|
||||||
|
b.Fatalf("unexpected evaluation error: %v", err)
|
||||||
|
}
|
||||||
|
if !strings.Contains(err.Error(), scenario.expectMatchError) {
|
||||||
|
b.Fatalf("expected evaluation error to contain %q, but got instead: %v", scenario.expectMatchError, err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if scenario.expectMatchError != "" {
|
||||||
|
b.Fatalf("expected match error %q, got none", scenario.expectMatchError)
|
||||||
|
}
|
||||||
|
if match != scenario.expectMatch {
|
||||||
|
b.Fatalf("expected result %v, got %v", scenario.expectMatch, match)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Reference in New Issue
Block a user