node: cpumgr: stricter precheck for full-pcpus-only

In order to implement the `full-pcpus-only` cpumanager policy option,
we leverage the implementation of the algorithm which picks CPUs.
By design, CPUs are taken from the biggest chunk available (socket
or NUMA zone) to physical cores, down to single cores.

Leveraging this, if the requested CPU count is a multiple of the SMT
level (commonly 2), we're guaranteed that only full physical cores
will be taken.

The hidden assumption here is this holds true by construction iff
the user reserved CPUs (if any) considering full physical CPUs.
IOW, if the user did intentionally or mistakely reserve single threads
which are no core siblings[1], then the simple check we implemented
is not sufficient.

A easy example can probably outline this better. With this setup:

cores: [(0, 4), (1, 5), (2, 6), (3, 8)] (in parens: thread siblings).
SMT level: 2 (each tuple is 2 elements)
Reserved CPUs: 0,1 (explicit pick using `--reserved-cpus`)

A container then requests 6 cpus. full-pcpus-only check: 6 % 2 == 0. Passed.
The CPU allocator will take first full cores, (2,6) and (3,8), and will
then pick the remaining single CPUs. The allocation will succeed, but
it's incorrect.

We can fix this case with a stricter precheck.
We need to additionally consider all the core siblings of the reserved
CPUs as unavailable when computing the free cpus, before to start the
actual allocation. Doing so, we fall back in the intended behavior, and
by construction all possible CPUs allocation whose number is multiple
of the SMT level are now correct again.

+++

[1] or thread siblings in the linux parlance, in any case:
hyperthread siblings of the same physical core

Signed-off-by: Francesco Romani <fromani@redhat.com>
This commit is contained in:
Francesco Romani 2022-11-23 18:09:19 +01:00
parent 7649afeffc
commit 0e9b92090c
4 changed files with 396 additions and 96 deletions

View File

@ -43,11 +43,15 @@ const (
// SMTAlignmentError represents an error due to SMT alignment
type SMTAlignmentError struct {
RequestedCPUs int
CpusPerCore int
RequestedCPUs int
CpusPerCore int
AvailablePhysicalCPUs int
}
func (e SMTAlignmentError) Error() string {
if e.AvailablePhysicalCPUs > 0 {
return fmt.Sprintf("SMT Alignment Error: not enough free physical CPUs: available physical CPUs = %d, requested CPUs = %d, CPUs per core = %d", e.AvailablePhysicalCPUs, e.RequestedCPUs, e.CpusPerCore)
}
return fmt.Sprintf("SMT Alignment Error: requested %d cpus not multiple cpus per core = %d", e.RequestedCPUs, e.CpusPerCore)
}
@ -98,7 +102,13 @@ type staticPolicy struct {
// cpu socket topology
topology *topology.CPUTopology
// set of CPUs that is not available for exclusive assignment
reserved cpuset.CPUSet
reservedCPUs cpuset.CPUSet
// Superset of reservedCPUs. It includes not just the reservedCPUs themselves,
// but also any siblings of those reservedCPUs on the same physical die.
// NOTE: If the reserved set includes full physical CPUs from the beginning
// (e.g. only reserved pairs of core siblings) this set is expected to be
// identical to the reserved set.
reservedPhysicalCPUs cpuset.CPUSet
// topology manager reference to get container Topology affinity
affinity topologymanager.Store
// set of CPUs to reuse across allocations in a pod
@ -150,8 +160,18 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
return nil, err
}
klog.InfoS("Reserved CPUs not available for exclusive assignment", "reservedSize", reserved.Size(), "reserved", reserved)
policy.reserved = reserved
var reservedPhysicalCPUs cpuset.CPUSet
for _, cpu := range reserved.UnsortedList() {
core, err := topology.CPUCoreID(cpu)
if err != nil {
return nil, fmt.Errorf("[cpumanager] unable to build the reserved physical CPUs from the reserved set: %w", err)
}
reservedPhysicalCPUs = reservedPhysicalCPUs.Union(topology.CPUDetails.CPUsInCores(core))
}
klog.InfoS("Reserved CPUs not available for exclusive assignment", "reservedSize", reserved.Size(), "reserved", reserved, "reservedPhysicalCPUs", reservedPhysicalCPUs)
policy.reservedCPUs = reserved
policy.reservedPhysicalCPUs = reservedPhysicalCPUs
return policy, nil
}
@ -187,9 +207,9 @@ func (p *staticPolicy) validateState(s state.State) error {
// 1. Check if the reserved cpuset is not part of default cpuset because:
// - kube/system reserved have changed (increased) - may lead to some containers not being able to start
// - user tampered with file
if !p.reserved.Intersection(tmpDefaultCPUset).Equals(p.reserved) {
if !p.reservedCPUs.Intersection(tmpDefaultCPUset).Equals(p.reservedCPUs) {
return fmt.Errorf("not all reserved cpus: \"%s\" are present in defaultCpuSet: \"%s\"",
p.reserved.String(), tmpDefaultCPUset.String())
p.reservedCPUs.String(), tmpDefaultCPUset.String())
}
// 2. Check if state for static policy is consistent
@ -228,12 +248,16 @@ func (p *staticPolicy) validateState(s state.State) error {
// GetAllocatableCPUs returns the total set of CPUs available for allocation.
func (p *staticPolicy) GetAllocatableCPUs(s state.State) cpuset.CPUSet {
return p.topology.CPUDetails.CPUs().Difference(p.reserved)
return p.topology.CPUDetails.CPUs().Difference(p.reservedCPUs)
}
// GetAvailableCPUs returns the set of unassigned CPUs minus the reserved set.
func (p *staticPolicy) GetAvailableCPUs(s state.State) cpuset.CPUSet {
return s.GetDefaultCPUSet().Difference(p.reserved)
return s.GetDefaultCPUSet().Difference(p.reservedCPUs)
}
func (p *staticPolicy) GetAvailablePhysicalCPUs(s state.State) cpuset.CPUSet {
return s.GetDefaultCPUSet().Difference(p.reservedPhysicalCPUs)
}
func (p *staticPolicy) updateCPUsToReuse(pod *v1.Pod, container *v1.Container, cset cpuset.CPUSet) {
@ -276,19 +300,36 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
}
}()
if p.options.FullPhysicalCPUsOnly && ((numCPUs % p.topology.CPUsPerCore()) != 0) {
// Since CPU Manager has been enabled requesting strict SMT alignment, it means a guaranteed pod can only be admitted
// if the CPU requested is a multiple of the number of virtual cpus per physical cores.
// In case CPU request is not a multiple of the number of virtual cpus per physical cores the Pod will be put
// in Failed state, with SMTAlignmentError as reason. Since the allocation happens in terms of physical cores
// and the scheduler is responsible for ensuring that the workload goes to a node that has enough CPUs,
// the pod would be placed on a node where there are enough physical cores available to be allocated.
// Just like the behaviour in case of static policy, takeByTopology will try to first allocate CPUs from the same socket
// and only in case the request cannot be sattisfied on a single socket, CPU allocation is done for a workload to occupy all
// CPUs on a physical core. Allocation of individual threads would never have to occur.
return SMTAlignmentError{
RequestedCPUs: numCPUs,
CpusPerCore: p.topology.CPUsPerCore(),
if p.options.FullPhysicalCPUsOnly {
CPUsPerCore := p.topology.CPUsPerCore()
if (numCPUs % CPUsPerCore) != 0 {
// Since CPU Manager has been enabled requesting strict SMT alignment, it means a guaranteed pod can only be admitted
// if the CPU requested is a multiple of the number of virtual cpus per physical cores.
// In case CPU request is not a multiple of the number of virtual cpus per physical cores the Pod will be put
// in Failed state, with SMTAlignmentError as reason. Since the allocation happens in terms of physical cores
// and the scheduler is responsible for ensuring that the workload goes to a node that has enough CPUs,
// the pod would be placed on a node where there are enough physical cores available to be allocated.
// Just like the behaviour in case of static policy, takeByTopology will try to first allocate CPUs from the same socket
// and only in case the request cannot be sattisfied on a single socket, CPU allocation is done for a workload to occupy all
// CPUs on a physical core. Allocation of individual threads would never have to occur.
return SMTAlignmentError{
RequestedCPUs: numCPUs,
CpusPerCore: CPUsPerCore,
}
}
availablePhysicalCPUs := p.GetAvailablePhysicalCPUs(s).Size()
// It's legal to reserve CPUs which are not core siblings. In this case the CPU allocator can descend to single cores
// when picking CPUs. This will void the guarantee of FullPhysicalCPUsOnly. To prevent this, we need to additionally consider
// all the core siblings of the reserved CPUs as unavailable when computing the free CPUs, before to start the actual allocation.
// This way, by construction all possible CPUs allocation whose number is multiple of the SMT level are now correct again.
if numCPUs > availablePhysicalCPUs {
return SMTAlignmentError{
RequestedCPUs: numCPUs,
CpusPerCore: CPUsPerCore,
AvailablePhysicalCPUs: availablePhysicalCPUs,
}
}
}
if cpuset, ok := s.GetCPUSet(string(pod.UID), container.Name); ok {

View File

@ -36,6 +36,7 @@ type staticPolicyTest struct {
description string
topo *topology.CPUTopology
numReservedCPUs int
reservedCPUs *cpuset.CPUSet
podUID string
options map[string]string
containerName string
@ -196,17 +197,6 @@ func TestStaticPolicyAdd(t *testing.T) {
// So we will permutate the options to ensure this holds true.
optionsInsensitiveTestCases := []staticPolicyTest{
{
description: "GuPodSingleCore, SingleSocketHT, ExpectError",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"),
expErr: fmt.Errorf("not enough cpus available to satisfy request"),
expCPUAlloc: false,
expCSet: cpuset.New(),
},
{
description: "GuPodMultipleCores, SingleSocketHT, ExpectAllocOneCore",
topo: topoSingleSocketHT,
@ -222,21 +212,6 @@ func TestStaticPolicyAdd(t *testing.T) {
expCPUAlloc: true,
expCSet: cpuset.New(1, 5),
},
{
description: "GuPodMultipleCores, SingleSocketHT, ExpectSameAllocation",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer3": cpuset.New(2, 3, 6, 7),
},
},
stDefaultCPUSet: cpuset.New(0, 1, 4, 5),
pod: makePod("fakePod", "fakeContainer3", "4000m", "4000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.New(2, 3, 6, 7),
},
{
description: "GuPodMultipleCores, DualSocketHT, ExpectAllocOneSocket",
topo: topoDualSocketHT,
@ -334,36 +309,6 @@ func TestStaticPolicyAdd(t *testing.T) {
expCPUAlloc: false,
expCSet: cpuset.New(),
},
{
description: "GuPodMultipleCores, SingleSocketHT, NoAllocExpectError",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.New(1, 2, 3, 4, 5, 6),
},
},
stDefaultCPUSet: cpuset.New(0, 7),
pod: makePod("fakePod", "fakeContainer5", "2000m", "2000m"),
expErr: fmt.Errorf("not enough cpus available to satisfy request"),
expCPUAlloc: false,
expCSet: cpuset.New(),
},
{
description: "GuPodMultipleCores, DualSocketHT, NoAllocExpectError",
topo: topoDualSocketHT,
numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.New(1, 2, 3),
},
},
stDefaultCPUSet: cpuset.New(0, 4, 5, 6, 7, 8, 9, 10, 11),
pod: makePod("fakePod", "fakeContainer5", "10000m", "10000m"),
expErr: fmt.Errorf("not enough cpus available to satisfy request"),
expCPUAlloc: false,
expCSet: cpuset.New(),
},
{
// All the CPUs from Socket 0 are available. Some CPUs from each
// Socket have been already assigned.
@ -416,23 +361,6 @@ func TestStaticPolicyAdd(t *testing.T) {
expCPUAlloc: true,
expCSet: largeTopoSock1CPUSet.Union(cpuset.New(10, 34, 22, 47)),
},
{
// Only 7 CPUs are available.
// Pod requests 76 cores.
// Error is expected since available CPUs are less than the request.
description: "GuPodMultipleCores, topoQuadSocketFourWayHT, NoAlloc",
topo: topoQuadSocketFourWayHT,
stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": largeTopoCPUSet.Difference(cpuset.New(10, 11, 53, 37, 55, 67, 52)),
},
},
stDefaultCPUSet: cpuset.New(10, 11, 53, 37, 55, 67, 52),
pod: makePod("fakePod", "fakeContainer5", "76000m", "76000m"),
expErr: fmt.Errorf("not enough cpus available to satisfy request"),
expCPUAlloc: false,
expCSet: cpuset.New(),
},
}
// testcases for the default behaviour of the policy.
@ -464,6 +392,79 @@ func TestStaticPolicyAdd(t *testing.T) {
expCPUAlloc: true,
expCSet: cpuset.New(10, 11, 53, 67, 52),
},
{
description: "GuPodSingleCore, SingleSocketHT, ExpectError",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.New(0, 1, 2, 3, 4, 5, 6, 7),
pod: makePod("fakePod", "fakeContainer2", "8000m", "8000m"),
expErr: fmt.Errorf("not enough cpus available to satisfy request"),
expCPUAlloc: false,
expCSet: cpuset.New(),
},
{
description: "GuPodMultipleCores, SingleSocketHT, ExpectSameAllocation",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer3": cpuset.New(2, 3, 6, 7),
},
},
stDefaultCPUSet: cpuset.New(0, 1, 4, 5),
pod: makePod("fakePod", "fakeContainer3", "4000m", "4000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.New(2, 3, 6, 7),
},
{
description: "GuPodMultipleCores, DualSocketHT, NoAllocExpectError",
topo: topoDualSocketHT,
numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.New(1, 2, 3),
},
},
stDefaultCPUSet: cpuset.New(0, 4, 5, 6, 7, 8, 9, 10, 11),
pod: makePod("fakePod", "fakeContainer5", "10000m", "10000m"),
expErr: fmt.Errorf("not enough cpus available to satisfy request"),
expCPUAlloc: false,
expCSet: cpuset.New(),
},
{
description: "GuPodMultipleCores, SingleSocketHT, NoAllocExpectError",
topo: topoSingleSocketHT,
numReservedCPUs: 1,
stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": cpuset.New(1, 2, 3, 4, 5, 6),
},
},
stDefaultCPUSet: cpuset.New(0, 7),
pod: makePod("fakePod", "fakeContainer5", "2000m", "2000m"),
expErr: fmt.Errorf("not enough cpus available to satisfy request"),
expCPUAlloc: false,
expCSet: cpuset.New(),
},
{
// Only 7 CPUs are available.
// Pod requests 76 cores.
// Error is expected since available CPUs are less than the request.
description: "GuPodMultipleCores, topoQuadSocketFourWayHT, NoAlloc",
topo: topoQuadSocketFourWayHT,
stAssignments: state.ContainerCPUAssignments{
"fakePod": map[string]cpuset.CPUSet{
"fakeContainer100": largeTopoCPUSet.Difference(cpuset.New(10, 11, 53, 37, 55, 67, 52)),
},
},
stDefaultCPUSet: cpuset.New(10, 11, 53, 37, 55, 67, 52),
pod: makePod("fakePod", "fakeContainer5", "76000m", "76000m"),
expErr: fmt.Errorf("not enough cpus available to satisfy request"),
expCPUAlloc: false,
expCSet: cpuset.New(),
},
}
// testcases for the FullPCPUsOnlyOption
@ -497,6 +498,50 @@ func TestStaticPolicyAdd(t *testing.T) {
expCPUAlloc: false,
expCSet: cpuset.New(),
},
{
description: "GuPodManyCores, topoDualSocketHT, ExpectDoNotAllocPartialCPU",
topo: topoDualSocketHT,
options: map[string]string{
FullPCPUsOnlyOption: "true",
},
numReservedCPUs: 2,
reservedCPUs: newCPUSetPtr(1, 6),
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.New(0, 2, 3, 4, 5, 7, 8, 9, 10, 11),
pod: makePod("fakePod", "fakeContainerBug113537_1", "10000m", "10000m"),
expErr: SMTAlignmentError{RequestedCPUs: 10, CpusPerCore: 2, AvailablePhysicalCPUs: 8},
expCPUAlloc: false,
expCSet: cpuset.New(),
},
{
description: "GuPodManyCores, topoDualSocketHT, AutoReserve, ExpectAllocAllCPUs",
topo: topoDualSocketHT,
options: map[string]string{
FullPCPUsOnlyOption: "true",
},
numReservedCPUs: 2,
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 7, 8, 9, 10, 11),
pod: makePod("fakePod", "fakeContainerBug113537_2", "10000m", "10000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.New(1, 2, 3, 4, 5, 7, 8, 9, 10, 11),
},
{
description: "GuPodManyCores, topoDualSocketHT, ExpectAllocAllCPUs",
topo: topoDualSocketHT,
options: map[string]string{
FullPCPUsOnlyOption: "true",
},
numReservedCPUs: 2,
reservedCPUs: newCPUSetPtr(0, 6),
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: cpuset.New(1, 2, 3, 4, 5, 7, 8, 9, 10, 11),
pod: makePod("fakePod", "fakeContainerBug113537_2", "10000m", "10000m"),
expErr: nil,
expCPUAlloc: true,
expCSet: cpuset.New(1, 2, 3, 4, 5, 7, 8, 9, 10, 11),
},
}
newNUMAAffinity := func(bits ...int) bitmask.BitMask {
affinity, _ := bitmask.NewBitMask(bits...)
@ -565,7 +610,11 @@ func runStaticPolicyTestCase(t *testing.T, testCase staticPolicyTest) {
if testCase.topologyHint != nil {
tm = topologymanager.NewFakeManagerWithHint(testCase.topologyHint)
}
policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpuset.New(), tm, testCase.options)
cpus := cpuset.New()
if testCase.reservedCPUs != nil {
cpus = testCase.reservedCPUs.Clone()
}
policy, _ := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, cpus, tm, testCase.options)
st := &mockState{
assignments: testCase.stAssignments,
@ -1093,3 +1142,8 @@ func TestStaticPolicyOptions(t *testing.T) {
})
}
}
func newCPUSetPtr(cpus ...int) *cpuset.CPUSet {
ret := cpuset.New(cpus...)
return &ret
}

View File

@ -62,6 +62,34 @@ func (topo *CPUTopology) CPUsPerSocket() int {
return topo.NumCPUs / topo.NumSockets
}
// CPUCoreID returns the physical core ID which the given logical CPU
// belongs to.
func (topo *CPUTopology) CPUCoreID(cpu int) (int, error) {
info, ok := topo.CPUDetails[cpu]
if !ok {
return -1, fmt.Errorf("unknown CPU ID: %d", cpu)
}
return info.CoreID, nil
}
// CPUCoreID returns the socket ID which the given logical CPU belongs to.
func (topo *CPUTopology) CPUSocketID(cpu int) (int, error) {
info, ok := topo.CPUDetails[cpu]
if !ok {
return -1, fmt.Errorf("unknown CPU ID: %d", cpu)
}
return info.SocketID, nil
}
// CPUCoreID returns the NUMA node ID which the given logical CPU belongs to.
func (topo *CPUTopology) CPUNUMANodeID(cpu int) (int, error) {
info, ok := topo.CPUDetails[cpu]
if !ok {
return -1, fmt.Errorf("unknown CPU ID: %d", cpu)
}
return info.NUMANodeID, nil
}
// CPUInfo contains the NUMA, socket, and core IDs associated with a CPU.
type CPUInfo struct {
NUMANodeID int

View File

@ -923,3 +923,180 @@ func TestCPUDetailsCPUsInCores(t *testing.T) {
})
}
}
func TestCPUCoreID(t *testing.T) {
topoDualSocketHT := &CPUTopology{
NumCPUs: 12,
NumSockets: 2,
NumCores: 6,
CPUDetails: map[int]CPUInfo{
0: {CoreID: 0, SocketID: 0, NUMANodeID: 0},
1: {CoreID: 1, SocketID: 1, NUMANodeID: 1},
2: {CoreID: 2, SocketID: 0, NUMANodeID: 0},
3: {CoreID: 3, SocketID: 1, NUMANodeID: 1},
4: {CoreID: 4, SocketID: 0, NUMANodeID: 0},
5: {CoreID: 5, SocketID: 1, NUMANodeID: 1},
6: {CoreID: 0, SocketID: 0, NUMANodeID: 0},
7: {CoreID: 1, SocketID: 1, NUMANodeID: 1},
8: {CoreID: 2, SocketID: 0, NUMANodeID: 0},
9: {CoreID: 3, SocketID: 1, NUMANodeID: 1},
10: {CoreID: 4, SocketID: 0, NUMANodeID: 0},
11: {CoreID: 5, SocketID: 1, NUMANodeID: 1},
},
}
tests := []struct {
name string
topo *CPUTopology
id int
want int
wantErr bool
}{{
name: "Known Core ID",
topo: topoDualSocketHT,
id: 2,
want: 2,
}, {
name: "Known Core ID (core sibling).",
topo: topoDualSocketHT,
id: 8,
want: 2,
}, {
name: "Unknown Core ID.",
topo: topoDualSocketHT,
id: -2,
want: -1,
wantErr: true,
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := tt.topo.CPUCoreID(tt.id)
gotErr := (err != nil)
if gotErr != tt.wantErr {
t.Errorf("CPUCoreID() returned err %v, want %v", gotErr, tt.wantErr)
}
if got != tt.want {
t.Errorf("CPUCoreID() returned %v, want %v", got, tt.want)
}
})
}
}
func TestCPUSocketID(t *testing.T) {
topoDualSocketHT := &CPUTopology{
NumCPUs: 12,
NumSockets: 2,
NumCores: 6,
CPUDetails: map[int]CPUInfo{
0: {CoreID: 0, SocketID: 0, NUMANodeID: 0},
1: {CoreID: 1, SocketID: 1, NUMANodeID: 1},
2: {CoreID: 2, SocketID: 0, NUMANodeID: 0},
3: {CoreID: 3, SocketID: 1, NUMANodeID: 1},
4: {CoreID: 4, SocketID: 0, NUMANodeID: 0},
5: {CoreID: 5, SocketID: 1, NUMANodeID: 1},
6: {CoreID: 0, SocketID: 0, NUMANodeID: 0},
7: {CoreID: 1, SocketID: 1, NUMANodeID: 1},
8: {CoreID: 2, SocketID: 0, NUMANodeID: 0},
9: {CoreID: 3, SocketID: 1, NUMANodeID: 1},
10: {CoreID: 4, SocketID: 0, NUMANodeID: 0},
11: {CoreID: 5, SocketID: 1, NUMANodeID: 1},
},
}
tests := []struct {
name string
topo *CPUTopology
id int
want int
wantErr bool
}{{
name: "Known Core ID",
topo: topoDualSocketHT,
id: 3,
want: 1,
}, {
name: "Known Core ID (core sibling).",
topo: topoDualSocketHT,
id: 9,
want: 1,
}, {
name: "Unknown Core ID.",
topo: topoDualSocketHT,
id: 1000,
want: -1,
wantErr: true,
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := tt.topo.CPUSocketID(tt.id)
gotErr := (err != nil)
if gotErr != tt.wantErr {
t.Errorf("CPUSocketID() returned err %v, want %v", gotErr, tt.wantErr)
}
if got != tt.want {
t.Errorf("CPUSocketID() returned %v, want %v", got, tt.want)
}
})
}
}
func TestCPUNUMANodeID(t *testing.T) {
topoDualSocketHT := &CPUTopology{
NumCPUs: 12,
NumSockets: 2,
NumCores: 6,
CPUDetails: map[int]CPUInfo{
0: {CoreID: 0, SocketID: 0, NUMANodeID: 0},
1: {CoreID: 1, SocketID: 1, NUMANodeID: 1},
2: {CoreID: 2, SocketID: 0, NUMANodeID: 0},
3: {CoreID: 3, SocketID: 1, NUMANodeID: 1},
4: {CoreID: 4, SocketID: 0, NUMANodeID: 0},
5: {CoreID: 5, SocketID: 1, NUMANodeID: 1},
6: {CoreID: 0, SocketID: 0, NUMANodeID: 0},
7: {CoreID: 1, SocketID: 1, NUMANodeID: 1},
8: {CoreID: 2, SocketID: 0, NUMANodeID: 0},
9: {CoreID: 3, SocketID: 1, NUMANodeID: 1},
10: {CoreID: 4, SocketID: 0, NUMANodeID: 0},
11: {CoreID: 5, SocketID: 1, NUMANodeID: 1},
},
}
tests := []struct {
name string
topo *CPUTopology
id int
want int
wantErr bool
}{{
name: "Known Core ID",
topo: topoDualSocketHT,
id: 0,
want: 0,
}, {
name: "Known Core ID (core sibling).",
topo: topoDualSocketHT,
id: 6,
want: 0,
}, {
name: "Unknown Core ID.",
topo: topoDualSocketHT,
id: 1000,
want: -1,
wantErr: true,
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := tt.topo.CPUNUMANodeID(tt.id)
gotErr := (err != nil)
if gotErr != tt.wantErr {
t.Errorf("CPUSocketID() returned err %v, want %v", gotErr, tt.wantErr)
}
if got != tt.want {
t.Errorf("CPUSocketID() returned %v, want %v", got, tt.want)
}
})
}
}