DRA scheduler: also pre-compute the unique ResourceSlice.NodeName

Converting a node's name once to a unique string and then converting to many
unique names is faster than memory comparisons.

    goos: linux
    goarch: amd64
    pkg: k8s.io/kubernetes/test/integration/scheduler_perf
    cpu: Intel(R) Core(TM) i9-7980XE CPU @ 2.60GHz
                                                                                       │            before            │                       after                        │
                                                                                       │ SchedulingThroughput/Average │ SchedulingThroughput/Average  vs base              │
    PerfScheduling/SchedulingWithResourceClaimTemplateStructured/5000pods_500nodes-36                      36.65 ± 2%                     36.89 ± 2%       ~ (p=0.452 n=6)
    PerfScheduling/SteadyStateClusterResourceClaimTemplateStructured/empty_100nodes-36                     106.7 ± 3%                     105.7 ± 5%       ~ (p=0.701 n=6)
    PerfScheduling/SteadyStateClusterResourceClaimTemplateStructured/empty_500nodes-36                     119.7 ± 3%                     117.8 ± 3%       ~ (p=0.084 n=6)
    PerfScheduling/SteadyStateClusterResourceClaimTemplateStructured/half_100nodes-36                      121.1 ± 4%                     119.5 ± 4%       ~ (p=0.297 n=6)
    PerfScheduling/SteadyStateClusterResourceClaimTemplateStructured/half_500nodes-36                      63.72 ± 3%                     63.22 ± 2%       ~ (p=0.485 n=6)
    PerfScheduling/SteadyStateClusterResourceClaimTemplateStructured/full_100nodes-36                      110.2 ± 2%                     109.5 ± 2%       ~ (p=0.258 n=6)
    PerfScheduling/SteadyStateClusterResourceClaimTemplateStructured/full_500nodes-36                      28.16 ± 5%                     27.56 ± 5%       ~ (p=0.513 n=6)
    geomean                                                                                                73.15                          72.44       -0.98%
This commit is contained in:
Patrick Ohly 2024-09-05 18:41:34 +02:00
parent 814c9428fd
commit f070dd760c
5 changed files with 23 additions and 6 deletions

View File

@ -29,11 +29,19 @@ var (
)
func Convert_api_UniqueString_To_string(in *UniqueString, out *string, s conversion.Scope) error {
if *in == NullUniqueString {
*out = ""
return nil
}
*out = in.String()
return nil
}
func Convert_string_To_api_UniqueString(in *string, out *UniqueString, s conversion.Scope) error {
if *in == "" {
*out = NullUniqueString
return nil
}
*out = UniqueString(unique.Make(*in))
return nil
}

View File

@ -31,7 +31,7 @@ type ResourceSlice struct {
type ResourceSliceSpec struct {
Driver UniqueString
Pool ResourcePool
NodeName string
NodeName UniqueString
NodeSelector *v1.NodeSelector
AllNodes bool
Devices []Device

View File

@ -246,7 +246,9 @@ func autoConvert_api_ResourceSliceSpec_To_v1alpha3_ResourceSliceSpec(in *Resourc
if err := Convert_api_ResourcePool_To_v1alpha3_ResourcePool(&in.Pool, &out.Pool, s); err != nil {
return err
}
out.NodeName = in.NodeName
if err := Convert_api_UniqueString_To_string(&in.NodeName, &out.NodeName, s); err != nil {
return err
}
out.NodeSelector = (*v1.NodeSelector)(unsafe.Pointer(in.NodeSelector))
out.AllNodes = in.AllNodes
if in.Devices != nil {
@ -275,7 +277,9 @@ func autoConvert_v1alpha3_ResourceSliceSpec_To_api_ResourceSliceSpec(in *v1alpha
if err := Convert_v1alpha3_ResourcePool_To_api_ResourcePool(&in.Pool, &out.Pool, s); err != nil {
return err
}
out.NodeName = in.NodeName
if err := Convert_string_To_api_UniqueString(&in.NodeName, &out.NodeName, s); err != nil {
return err
}
out.NodeSelector = (*v1.NodeSelector)(unsafe.Pointer(in.NodeSelector))
out.AllNodes = in.AllNodes
if in.Devices != nil {

View File

@ -839,7 +839,7 @@ func (alloc *allocator) createNodeSelector(result []internalDeviceResult) (*v1.N
for i := range result {
slice := result[i].slice
if slice.Spec.NodeName != "" {
if slice.Spec.NodeName != draapi.NullUniqueString {
// At least one device is local to one node. This
// restricts the allocation to that node.
return &v1.NodeSelector{
@ -847,7 +847,7 @@ func (alloc *allocator) createNodeSelector(result []internalDeviceResult) (*v1.N
MatchFields: []v1.NodeSelectorRequirement{{
Key: "metadata.name",
Operator: v1.NodeSelectorOpIn,
Values: []string{slice.Spec.NodeName},
Values: []string{slice.Spec.NodeName.String()},
}},
}},
}, nil

View File

@ -35,11 +35,15 @@ import (
// Both is recorded in the result.
func GatherPools(ctx context.Context, slices []*resourceapi.ResourceSlice, node *v1.Node) ([]*Pool, error) {
pools := make(map[PoolID]*Pool)
nodeName := ""
if node != nil {
nodeName = node.Name
}
for _, slice := range slices {
switch {
case slice.Spec.NodeName != "":
if slice.Spec.NodeName == node.Name {
if slice.Spec.NodeName == nodeName {
if err := addSlice(pools, slice); err != nil {
return nil, fmt.Errorf("add node slice %s: %w", slice.Name, err)
}
@ -49,6 +53,7 @@ func GatherPools(ctx context.Context, slices []*resourceapi.ResourceSlice, node
return nil, fmt.Errorf("add cluster slice %s: %w", slice.Name, err)
}
case slice.Spec.NodeSelector != nil:
// TODO: move conversion into api.
selector, err := nodeaffinity.NewNodeSelector(slice.Spec.NodeSelector)
if err != nil {
return nil, fmt.Errorf("node selector in resource slice %s: %w", slice.Name, err)