Merge pull request #102015 from klueska/upstream-add-numa-to-cpu-assignment-algo

Add support for consuming whole NUMA nodes in CPUManager CPU assignments
This commit is contained in:
Kubernetes Prow Robot 2021-10-15 05:44:54 -07:00 committed by GitHub
commit 55e1d2f9a7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 1009 additions and 93 deletions

View File

@ -26,20 +26,138 @@ import (
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
)
type numaOrSocketsFirstFuncs interface {
takeFullFirstLevel()
takeFullSecondLevel()
sortAvailableNUMANodes() []int
sortAvailableSockets() []int
sortAvailableCores() []int
}
type numaFirst struct{ acc *cpuAccumulator }
type socketsFirst struct{ acc *cpuAccumulator }
var _ numaOrSocketsFirstFuncs = (*numaFirst)(nil)
var _ numaOrSocketsFirstFuncs = (*socketsFirst)(nil)
// If NUMA nodes are higher in the memory hierarchy than sockets, then we take
// from the set of NUMA Nodes as the first level.
func (n *numaFirst) takeFullFirstLevel() {
n.acc.takeFullNUMANodes()
}
// If NUMA nodes are higher in the memory hierarchy than sockets, then we take
// from the set of sockets as the second level.
func (n *numaFirst) takeFullSecondLevel() {
n.acc.takeFullSockets()
}
// If NUMA nodes are higher in the memory hierarchy than sockets, then just
// sort the NUMA nodes directly, and return them.
func (n *numaFirst) sortAvailableNUMANodes() []int {
numas := n.acc.details.NUMANodes().ToSliceNoSort()
n.acc.sort(numas, n.acc.details.CPUsInNUMANodes)
return numas
}
// If NUMA nodes are higher in the memory hierarchy than sockets, then we need
// to pull the set of sockets out of each sorted NUMA node, and accumulate the
// partial order across them.
func (n *numaFirst) sortAvailableSockets() []int {
var result []int
for _, numa := range n.sortAvailableNUMANodes() {
sockets := n.acc.details.SocketsInNUMANodes(numa).ToSliceNoSort()
n.acc.sort(sockets, n.acc.details.CPUsInSockets)
result = append(result, sockets...)
}
return result
}
// If NUMA nodes are higher in the memory hierarchy than sockets, then
// cores sit directly below sockets in the memory hierarchy.
func (n *numaFirst) sortAvailableCores() []int {
var result []int
for _, socket := range n.acc.sortAvailableSockets() {
cores := n.acc.details.CoresInSockets(socket).ToSliceNoSort()
n.acc.sort(cores, n.acc.details.CPUsInCores)
result = append(result, cores...)
}
return result
}
// If sockets are higher in the memory hierarchy than NUMA nodes, then we take
// from the set of sockets as the first level.
func (s *socketsFirst) takeFullFirstLevel() {
s.acc.takeFullSockets()
}
// If sockets are higher in the memory hierarchy than NUMA nodes, then we take
// from the set of NUMA Nodes as the second level.
func (s *socketsFirst) takeFullSecondLevel() {
s.acc.takeFullNUMANodes()
}
// If sockets are higher in the memory hierarchy than NUMA nodes, then we need
// to pull the set of NUMA nodes out of each sorted Socket, and accumulate the
// partial order across them.
func (s *socketsFirst) sortAvailableNUMANodes() []int {
var result []int
for _, socket := range s.sortAvailableSockets() {
numas := s.acc.details.NUMANodesInSockets(socket).ToSliceNoSort()
s.acc.sort(numas, s.acc.details.CPUsInNUMANodes)
result = append(result, numas...)
}
return result
}
// If sockets are higher in the memory hierarchy than NUMA nodes, then just
// sort the sockets directly, and return them.
func (s *socketsFirst) sortAvailableSockets() []int {
sockets := s.acc.details.Sockets().ToSliceNoSort()
s.acc.sort(sockets, s.acc.details.CPUsInSockets)
return sockets
}
// If sockets are higher in the memory hierarchy than NUMA nodes, then cores
// sit directly below NUMA Nodes in the memory hierarchy.
func (s *socketsFirst) sortAvailableCores() []int {
var result []int
for _, numa := range s.acc.sortAvailableNUMANodes() {
cores := s.acc.details.CoresInNUMANodes(numa).ToSliceNoSort()
s.acc.sort(cores, s.acc.details.CPUsInCores)
result = append(result, cores...)
}
return result
}
type cpuAccumulator struct {
topo *topology.CPUTopology
details topology.CPUDetails
numCPUsNeeded int
result cpuset.CPUSet
topo *topology.CPUTopology
details topology.CPUDetails
numCPUsNeeded int
result cpuset.CPUSet
numaOrSocketsFirst numaOrSocketsFirstFuncs
}
func newCPUAccumulator(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, numCPUs int) *cpuAccumulator {
return &cpuAccumulator{
acc := &cpuAccumulator{
topo: topo,
details: topo.CPUDetails.KeepOnly(availableCPUs),
numCPUsNeeded: numCPUs,
result: cpuset.NewCPUSet(),
}
if topo.NumSockets >= topo.NumNUMANodes {
acc.numaOrSocketsFirst = &numaFirst{acc}
} else {
acc.numaOrSocketsFirst = &socketsFirst{acc}
}
return acc
}
// Returns true if the supplied NUMANode is fully available in `topoDetails`.
func (a *cpuAccumulator) isNUMANodeFree(numaID int) bool {
return a.details.CPUsInNUMANodes(numaID).Size() == a.topo.CPUDetails.CPUsInNUMANodes(numaID).Size()
}
// Returns true if the supplied socket is fully available in `topoDetails`.
@ -52,6 +170,17 @@ func (a *cpuAccumulator) isCoreFree(coreID int) bool {
return a.details.CPUsInCores(coreID).Size() == a.topo.CPUsPerCore()
}
// Returns free NUMA Node IDs as a slice sorted by sortAvailableNUMANodes().
func (a *cpuAccumulator) freeNUMANodes() []int {
free := []int{}
for _, numa := range a.sortAvailableNUMANodes() {
if a.isNUMANodeFree(numa) {
free = append(free, numa)
}
}
return free
}
// Returns free socket IDs as a slice sorted by sortAvailableSockets().
func (a *cpuAccumulator) freeSockets() []int {
free := []int{}
@ -79,12 +208,12 @@ func (a *cpuAccumulator) freeCPUs() []int {
return a.sortAvailableCPUs()
}
// Sorts the provided list of sockets/cores/cpus referenced in 'ids' by the
// number of available CPUs contained within them (smallest to largest). The
// 'getCPU()' paramater defines the function that should be called to retrieve
// the list of available CPUs for the type of socket/core/cpu being referenced.
// If two sockets/cores/cpus have the same number of available CPUs, they are
// sorted in ascending order by their id.
// Sorts the provided list of NUMA nodes/sockets/cores/cpus referenced in 'ids'
// by the number of available CPUs contained within them (smallest to largest).
// The 'getCPU()' paramater defines the function that should be called to
// retrieve the list of available CPUs for the type being referenced. If two
// NUMA nodes/sockets/cores/cpus have the same number of available CPUs, they
// are sorted in ascending order by their id.
func (a *cpuAccumulator) sort(ids []int, getCPUs func(ids ...int) cpuset.CPUSet) {
sort.Slice(ids,
func(i, j int) bool {
@ -100,24 +229,19 @@ func (a *cpuAccumulator) sort(ids []int, getCPUs func(ids ...int) cpuset.CPUSet)
})
}
// Sort all sockets with free CPUs using the sort() algorithm defined above.
// Sort all NUMA nodes with free CPUs.
func (a *cpuAccumulator) sortAvailableNUMANodes() []int {
return a.numaOrSocketsFirst.sortAvailableNUMANodes()
}
// Sort all sockets with free CPUs.
func (a *cpuAccumulator) sortAvailableSockets() []int {
sockets := a.details.Sockets().ToSliceNoSort()
a.sort(sockets, a.details.CPUsInSockets)
return sockets
return a.numaOrSocketsFirst.sortAvailableSockets()
}
// Sort all cores with free CPUs:
// - First by socket using sortAvailableSockets().
// - Then within each socket, using the sort() algorithm defined above.
func (a *cpuAccumulator) sortAvailableCores() []int {
var result []int
for _, socket := range a.sortAvailableSockets() {
cores := a.details.CoresInSockets(socket).ToSliceNoSort()
a.sort(cores, a.details.CPUsInCores)
result = append(result, cores...)
}
return result
return a.numaOrSocketsFirst.sortAvailableCores()
}
// Sort all available CPUs:
@ -139,6 +263,17 @@ func (a *cpuAccumulator) take(cpus cpuset.CPUSet) {
a.numCPUsNeeded -= cpus.Size()
}
func (a *cpuAccumulator) takeFullNUMANodes() {
for _, numa := range a.freeNUMANodes() {
cpusInNUMANode := a.topo.CPUDetails.CPUsInNUMANodes(numa)
if !a.needs(cpusInNUMANode.Size()) {
continue
}
klog.V(4).InfoS("takeFullNUMANodes: claiming NUMA node", "numa", numa)
a.take(cpusInNUMANode)
}
}
func (a *cpuAccumulator) takeFullSockets() {
for _, socket := range a.freeSockets() {
cpusInSocket := a.topo.CPUDetails.CPUsInSockets(socket)
@ -193,9 +328,15 @@ func takeByTopology(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, num
}
// Algorithm: topology-aware best-fit
// 1. Acquire whole sockets, if available and the container requires at
// least a socket's-worth of CPUs.
acc.takeFullSockets()
// 1. Acquire whole NUMA nodes and sockets, if available and the container
// requires at least a NUMA node or socket's-worth of CPUs. If NUMA
// Nodes map to 1 or more sockets, pull from NUMA nodes first.
// Otherwise pull from sockets first.
acc.numaOrSocketsFirst.takeFullFirstLevel()
if acc.isSatisfied() {
return acc.result, nil
}
acc.numaOrSocketsFirst.takeFullSecondLevel()
if acc.isSatisfied() {
return acc.result, nil
}

View File

@ -18,6 +18,7 @@ package cpumanager
import (
"reflect"
"sort"
"testing"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
@ -61,14 +62,217 @@ func TestCPUAccumulatorFreeSockets(t *testing.T) {
cpuset.NewCPUSet(0, 2, 3, 4, 5, 6, 7, 8, 9, 11),
[]int{},
},
{
"dual socket, multi numa per socket, HT, 2 sockets free",
topoDualSocketMultiNumaPerSocketHT,
mustParseCPUSet(t, "0-79"),
[]int{0, 1},
},
{
"dual socket, multi numa per socket, HT, 1 sockets free",
topoDualSocketMultiNumaPerSocketHT,
mustParseCPUSet(t, "1-79"),
[]int{1},
},
{
"dual socket, multi numa per socket, HT, 0 sockets free",
topoDualSocketMultiNumaPerSocketHT,
mustParseCPUSet(t, "1-78"),
[]int{},
},
{
"dual numa, multi socket per per socket, HT, 4 sockets free",
fakeTopoMultiSocketDualSocketPerNumaHT,
mustParseCPUSet(t, "0-79"),
[]int{0, 1, 2, 3},
},
{
"dual numa, multi socket per per socket, HT, 3 sockets free",
fakeTopoMultiSocketDualSocketPerNumaHT,
mustParseCPUSet(t, "0-19,21-79"),
[]int{0, 1, 3},
},
{
"dual numa, multi socket per per socket, HT, 2 sockets free",
fakeTopoMultiSocketDualSocketPerNumaHT,
mustParseCPUSet(t, "0-59,61-78"),
[]int{0, 1},
},
{
"dual numa, multi socket per per socket, HT, 1 sockets free",
fakeTopoMultiSocketDualSocketPerNumaHT,
mustParseCPUSet(t, "1-19,21-38,41-60,61-78"),
[]int{1},
},
{
"dual numa, multi socket per per socket, HT, 0 sockets free",
fakeTopoMultiSocketDualSocketPerNumaHT,
mustParseCPUSet(t, "0-40,42-49,51-68,71-79"),
[]int{},
},
}
for _, tc := range testCases {
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0)
result := acc.freeSockets()
if !reflect.DeepEqual(result, tc.expect) {
t.Errorf("[%s] expected %v to equal %v", tc.description, result, tc.expect)
}
t.Run(tc.description, func(t *testing.T) {
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0)
result := acc.freeSockets()
sort.Ints(result)
if !reflect.DeepEqual(result, tc.expect) {
t.Errorf("expected %v to equal %v", result, tc.expect)
}
})
}
}
func TestCPUAccumulatorFreeNUMANodes(t *testing.T) {
testCases := []struct {
description string
topo *topology.CPUTopology
availableCPUs cpuset.CPUSet
expect []int
}{
{
"single socket HT, 1 NUMA node free",
topoSingleSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7),
[]int{0},
},
{
"single socket HT, 0 NUMA Node free",
topoSingleSocketHT,
cpuset.NewCPUSet(1, 2, 3, 4, 5, 6, 7),
[]int{},
},
{
"dual socket HT, 2 NUMA Node free",
topoDualSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
[]int{0, 1},
},
{
"dual socket HT, 1 NUMA Node free",
topoDualSocketHT,
cpuset.NewCPUSet(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11),
[]int{1},
},
{
"dual socket HT, 0 NUMA node free",
topoDualSocketHT,
cpuset.NewCPUSet(0, 2, 3, 4, 5, 6, 7, 8, 9, 11),
[]int{},
},
{
"dual socket, multi numa per socket, HT, 4 NUMA Node free",
topoDualSocketMultiNumaPerSocketHT,
mustParseCPUSet(t, "0-79"),
[]int{0, 1, 2, 3},
},
{
"dual socket, multi numa per socket, HT, 3 NUMA node free",
topoDualSocketMultiNumaPerSocketHT,
mustParseCPUSet(t, "1-79"),
[]int{1, 2, 3},
},
{
"dual socket, multi numa per socket, HT, 2 NUMA node free",
topoDualSocketMultiNumaPerSocketHT,
mustParseCPUSet(t, "1-9,11-79"),
[]int{2, 3},
},
{
"dual socket, multi numa per socket, HT, 1 NUMA node free",
topoDualSocketMultiNumaPerSocketHT,
mustParseCPUSet(t, "1-9,11-59,61-79"),
[]int{3},
},
{
"dual socket, multi numa per socket, HT, 0 NUMA node free",
topoDualSocketMultiNumaPerSocketHT,
mustParseCPUSet(t, "1-9,11-59,61-78"),
[]int{},
},
{
"dual numa, multi socket per per socket, HT, 2 NUMA node free",
fakeTopoMultiSocketDualSocketPerNumaHT,
mustParseCPUSet(t, "0-79"),
[]int{0, 1},
},
{
"dual numa, multi socket per per socket, HT, 1 NUMA node free",
fakeTopoMultiSocketDualSocketPerNumaHT,
mustParseCPUSet(t, "0-9,11-79"),
[]int{1},
},
{
"dual numa, multi socket per per socket, HT, 0 sockets free",
fakeTopoMultiSocketDualSocketPerNumaHT,
mustParseCPUSet(t, "0-9,11-59,61-79"),
[]int{},
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0)
result := acc.freeNUMANodes()
if !reflect.DeepEqual(result, tc.expect) {
t.Errorf("expected %v to equal %v", result, tc.expect)
}
})
}
}
func TestCPUAccumulatorFreeSocketsAndNUMANodes(t *testing.T) {
testCases := []struct {
description string
topo *topology.CPUTopology
availableCPUs cpuset.CPUSet
expectSockets []int
expectNUMANodes []int
}{
{
"dual socket, multi numa per socket, HT, 2 Socket/4 NUMA Node free",
topoDualSocketMultiNumaPerSocketHT,
mustParseCPUSet(t, "0-79"),
[]int{0, 1},
[]int{0, 1, 2, 3},
},
{
"dual socket, multi numa per socket, HT, 1 Socket/3 NUMA node free",
topoDualSocketMultiNumaPerSocketHT,
mustParseCPUSet(t, "1-79"),
[]int{1},
[]int{1, 2, 3},
},
{
"dual socket, multi numa per socket, HT, 1 Socket/ 2 NUMA node free",
topoDualSocketMultiNumaPerSocketHT,
mustParseCPUSet(t, "1-9,11-79"),
[]int{1},
[]int{2, 3},
},
{
"dual socket, multi numa per socket, HT, 0 Socket/ 2 NUMA node free",
topoDualSocketMultiNumaPerSocketHT,
mustParseCPUSet(t, "1-59,61-79"),
[]int{},
[]int{1, 3},
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0)
resultNUMANodes := acc.freeNUMANodes()
if !reflect.DeepEqual(resultNUMANodes, tc.expectNUMANodes) {
t.Errorf("expected NUMA Nodes %v to equal %v", resultNUMANodes, tc.expectNUMANodes)
}
resultSockets := acc.freeSockets()
if !reflect.DeepEqual(resultSockets, tc.expectSockets) {
t.Errorf("expected Sockets %v to equal %v", resultSockets, tc.expectSockets)
}
})
}
}
@ -130,11 +334,13 @@ func TestCPUAccumulatorFreeCores(t *testing.T) {
}
for _, tc := range testCases {
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0)
result := acc.freeCores()
if !reflect.DeepEqual(result, tc.expect) {
t.Errorf("[%s] expected %v to equal %v", tc.description, result, tc.expect)
}
t.Run(tc.description, func(t *testing.T) {
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0)
result := acc.freeCores()
if !reflect.DeepEqual(result, tc.expect) {
t.Errorf("expected %v to equal %v", result, tc.expect)
}
})
}
}
@ -184,11 +390,13 @@ func TestCPUAccumulatorFreeCPUs(t *testing.T) {
}
for _, tc := range testCases {
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0)
result := acc.freeCPUs()
if !reflect.DeepEqual(result, tc.expect) {
t.Errorf("[%s] expected %v to equal %v", tc.description, result, tc.expect)
}
t.Run(tc.description, func(t *testing.T) {
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0)
result := acc.freeCPUs()
if !reflect.DeepEqual(result, tc.expect) {
t.Errorf("expected %v to equal %v", result, tc.expect)
}
})
}
}
@ -268,31 +476,33 @@ func TestCPUAccumulatorTake(t *testing.T) {
}
for _, tc := range testCases {
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, tc.numCPUs)
totalTaken := 0
for _, cpus := range tc.takeCPUs {
acc.take(cpus)
totalTaken += cpus.Size()
}
if tc.expectSatisfied != acc.isSatisfied() {
t.Errorf("[%s] expected acc.isSatisfied() to be %t", tc.description, tc.expectSatisfied)
}
if tc.expectFailed != acc.isFailed() {
t.Errorf("[%s] expected acc.isFailed() to be %t", tc.description, tc.expectFailed)
}
for _, cpus := range tc.takeCPUs {
availableCPUs := acc.details.CPUs()
if cpus.Intersection(availableCPUs).Size() > 0 {
t.Errorf("[%s] expected intersection of taken cpus [%s] and acc.details.CPUs() [%s] to be empty", tc.description, cpus, availableCPUs)
t.Run(tc.description, func(t *testing.T) {
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, tc.numCPUs)
totalTaken := 0
for _, cpus := range tc.takeCPUs {
acc.take(cpus)
totalTaken += cpus.Size()
}
if !cpus.IsSubsetOf(acc.result) {
t.Errorf("[%s] expected [%s] to be a subset of acc.result [%s]", tc.description, cpus, acc.result)
if tc.expectSatisfied != acc.isSatisfied() {
t.Errorf("expected acc.isSatisfied() to be %t", tc.expectSatisfied)
}
}
expNumCPUsNeeded := tc.numCPUs - totalTaken
if acc.numCPUsNeeded != expNumCPUsNeeded {
t.Errorf("[%s] expected acc.numCPUsNeeded to be %d (got %d)", tc.description, expNumCPUsNeeded, acc.numCPUsNeeded)
}
if tc.expectFailed != acc.isFailed() {
t.Errorf("expected acc.isFailed() to be %t", tc.expectFailed)
}
for _, cpus := range tc.takeCPUs {
availableCPUs := acc.details.CPUs()
if cpus.Intersection(availableCPUs).Size() > 0 {
t.Errorf("expected intersection of taken cpus [%s] and acc.details.CPUs() [%s] to be empty", cpus, availableCPUs)
}
if !cpus.IsSubsetOf(acc.result) {
t.Errorf("expected [%s] to be a subset of acc.result [%s]", cpus, acc.result)
}
}
expNumCPUsNeeded := tc.numCPUs - totalTaken
if acc.numCPUsNeeded != expNumCPUsNeeded {
t.Errorf("expected acc.numCPUsNeeded to be %d (got %d)", expNumCPUsNeeded, acc.numCPUsNeeded)
}
})
}
}
@ -377,15 +587,65 @@ func TestTakeByTopology(t *testing.T) {
"",
cpuset.NewCPUSet(0, 2, 4, 6, 8, 10),
},
{
"take a socket of cpus from dual socket with multi-numa-per-socket with HT",
topoDualSocketMultiNumaPerSocketHT,
mustParseCPUSet(t, "0-79"),
40,
"",
mustParseCPUSet(t, "0-19,40-59"),
},
{
"take a NUMA node of cpus from dual socket with multi-numa-per-socket with HT",
topoDualSocketMultiNumaPerSocketHT,
mustParseCPUSet(t, "0-79"),
20,
"",
mustParseCPUSet(t, "0-9,40-49"),
},
{
"take a NUMA node of cpus from dual socket with multi-numa-per-socket with HT, with 1 NUMA node already taken",
topoDualSocketMultiNumaPerSocketHT,
mustParseCPUSet(t, "10-39,50-79"),
20,
"",
mustParseCPUSet(t, "10-19,50-59"),
},
{
"take a socket and a NUMA node of cpus from dual socket with multi-numa-per-socket with HT",
topoDualSocketMultiNumaPerSocketHT,
mustParseCPUSet(t, "0-79"),
60,
"",
mustParseCPUSet(t, "0-29,40-69"),
},
{
"take a socket and a NUMA node of cpus from dual socket with multi-numa-per-socket with HT, a core taken",
topoDualSocketMultiNumaPerSocketHT,
mustParseCPUSet(t, "1-39,41-79"), // reserve the first (phys) core (0,40)
60,
"",
mustParseCPUSet(t, "10-39,50-79"),
},
}
for _, tc := range testCases {
result, err := takeByTopology(tc.topo, tc.availableCPUs, tc.numCPUs)
if tc.expErr != "" && err.Error() != tc.expErr {
t.Errorf("expected error to be [%v] but it was [%v] in test \"%s\"", tc.expErr, err, tc.description)
}
if !result.Equals(tc.expResult) {
t.Errorf("expected result [%s] to equal [%s] in test \"%s\"", result, tc.expResult, tc.description)
}
t.Run(tc.description, func(t *testing.T) {
result, err := takeByTopology(tc.topo, tc.availableCPUs, tc.numCPUs)
if tc.expErr != "" && err.Error() != tc.expErr {
t.Errorf("expected error to be [%v] but it was [%v]", tc.expErr, err)
}
if !result.Equals(tc.expResult) {
t.Errorf("expected result [%s] to equal [%s]", result, tc.expResult)
}
})
}
}
func mustParseCPUSet(t *testing.T, s string) cpuset.CPUSet {
cpus, err := cpuset.Parse(s)
if err != nil {
t.Errorf("parsing %q: %v", s, err)
}
return cpus
}

View File

@ -414,4 +414,200 @@ var (
282: {CoreID: 55, SocketID: 3, NUMANodeID: 3},
},
}
/*
Topology from dual xeon gold 6230; lscpu excerpt
CPU(s): 80
On-line CPU(s) list: 0-79
Thread(s) per core: 2
Core(s) per socket: 20
Socket(s): 2
NUMA node(s): 4
NUMA node0 CPU(s): 0-9,40-49
NUMA node1 CPU(s): 10-19,50-59
NUMA node2 CPU(s): 20-29,60-69
NUMA node3 CPU(s): 30-39,70-79
*/
topoDualSocketMultiNumaPerSocketHT = &topology.CPUTopology{
NumCPUs: 80,
NumSockets: 2,
NumCores: 40,
NumNUMANodes: 4,
CPUDetails: map[int]topology.CPUInfo{
0: {CoreID: 0, SocketID: 0, NUMANodeID: 0},
1: {CoreID: 1, SocketID: 0, NUMANodeID: 0},
2: {CoreID: 2, SocketID: 0, NUMANodeID: 0},
3: {CoreID: 3, SocketID: 0, NUMANodeID: 0},
4: {CoreID: 4, SocketID: 0, NUMANodeID: 0},
5: {CoreID: 5, SocketID: 0, NUMANodeID: 0},
6: {CoreID: 6, SocketID: 0, NUMANodeID: 0},
7: {CoreID: 7, SocketID: 0, NUMANodeID: 0},
8: {CoreID: 8, SocketID: 0, NUMANodeID: 0},
9: {CoreID: 9, SocketID: 0, NUMANodeID: 0},
10: {CoreID: 10, SocketID: 0, NUMANodeID: 1},
11: {CoreID: 11, SocketID: 0, NUMANodeID: 1},
12: {CoreID: 12, SocketID: 0, NUMANodeID: 1},
13: {CoreID: 13, SocketID: 0, NUMANodeID: 1},
14: {CoreID: 14, SocketID: 0, NUMANodeID: 1},
15: {CoreID: 15, SocketID: 0, NUMANodeID: 1},
16: {CoreID: 16, SocketID: 0, NUMANodeID: 1},
17: {CoreID: 17, SocketID: 0, NUMANodeID: 1},
18: {CoreID: 18, SocketID: 0, NUMANodeID: 1},
19: {CoreID: 19, SocketID: 0, NUMANodeID: 1},
20: {CoreID: 20, SocketID: 1, NUMANodeID: 2},
21: {CoreID: 21, SocketID: 1, NUMANodeID: 2},
22: {CoreID: 22, SocketID: 1, NUMANodeID: 2},
23: {CoreID: 23, SocketID: 1, NUMANodeID: 2},
24: {CoreID: 24, SocketID: 1, NUMANodeID: 2},
25: {CoreID: 25, SocketID: 1, NUMANodeID: 2},
26: {CoreID: 26, SocketID: 1, NUMANodeID: 2},
27: {CoreID: 27, SocketID: 1, NUMANodeID: 2},
28: {CoreID: 28, SocketID: 1, NUMANodeID: 2},
29: {CoreID: 29, SocketID: 1, NUMANodeID: 2},
30: {CoreID: 30, SocketID: 1, NUMANodeID: 3},
31: {CoreID: 31, SocketID: 1, NUMANodeID: 3},
32: {CoreID: 32, SocketID: 1, NUMANodeID: 3},
33: {CoreID: 33, SocketID: 1, NUMANodeID: 3},
34: {CoreID: 34, SocketID: 1, NUMANodeID: 3},
35: {CoreID: 35, SocketID: 1, NUMANodeID: 3},
36: {CoreID: 36, SocketID: 1, NUMANodeID: 3},
37: {CoreID: 37, SocketID: 1, NUMANodeID: 3},
38: {CoreID: 38, SocketID: 1, NUMANodeID: 3},
39: {CoreID: 39, SocketID: 1, NUMANodeID: 3},
40: {CoreID: 0, SocketID: 0, NUMANodeID: 0},
41: {CoreID: 1, SocketID: 0, NUMANodeID: 0},
42: {CoreID: 2, SocketID: 0, NUMANodeID: 0},
43: {CoreID: 3, SocketID: 0, NUMANodeID: 0},
44: {CoreID: 4, SocketID: 0, NUMANodeID: 0},
45: {CoreID: 5, SocketID: 0, NUMANodeID: 0},
46: {CoreID: 6, SocketID: 0, NUMANodeID: 0},
47: {CoreID: 7, SocketID: 0, NUMANodeID: 0},
48: {CoreID: 8, SocketID: 0, NUMANodeID: 0},
49: {CoreID: 9, SocketID: 0, NUMANodeID: 0},
50: {CoreID: 10, SocketID: 0, NUMANodeID: 1},
51: {CoreID: 11, SocketID: 0, NUMANodeID: 1},
52: {CoreID: 12, SocketID: 0, NUMANodeID: 1},
53: {CoreID: 13, SocketID: 0, NUMANodeID: 1},
54: {CoreID: 14, SocketID: 0, NUMANodeID: 1},
55: {CoreID: 15, SocketID: 0, NUMANodeID: 1},
56: {CoreID: 16, SocketID: 0, NUMANodeID: 1},
57: {CoreID: 17, SocketID: 0, NUMANodeID: 1},
58: {CoreID: 18, SocketID: 0, NUMANodeID: 1},
59: {CoreID: 19, SocketID: 0, NUMANodeID: 1},
60: {CoreID: 20, SocketID: 1, NUMANodeID: 2},
61: {CoreID: 21, SocketID: 1, NUMANodeID: 2},
62: {CoreID: 22, SocketID: 1, NUMANodeID: 2},
63: {CoreID: 23, SocketID: 1, NUMANodeID: 2},
64: {CoreID: 24, SocketID: 1, NUMANodeID: 2},
65: {CoreID: 25, SocketID: 1, NUMANodeID: 2},
66: {CoreID: 26, SocketID: 1, NUMANodeID: 2},
67: {CoreID: 27, SocketID: 1, NUMANodeID: 2},
68: {CoreID: 28, SocketID: 1, NUMANodeID: 2},
69: {CoreID: 29, SocketID: 1, NUMANodeID: 2},
70: {CoreID: 30, SocketID: 1, NUMANodeID: 3},
71: {CoreID: 31, SocketID: 1, NUMANodeID: 3},
72: {CoreID: 32, SocketID: 1, NUMANodeID: 3},
73: {CoreID: 33, SocketID: 1, NUMANodeID: 3},
74: {CoreID: 34, SocketID: 1, NUMANodeID: 3},
75: {CoreID: 35, SocketID: 1, NUMANodeID: 3},
76: {CoreID: 36, SocketID: 1, NUMANodeID: 3},
77: {CoreID: 37, SocketID: 1, NUMANodeID: 3},
78: {CoreID: 38, SocketID: 1, NUMANodeID: 3},
79: {CoreID: 39, SocketID: 1, NUMANodeID: 3},
},
}
/*
FAKE Topology from dual xeon gold 6230
(see: topoDualSocketMultiNumaPerSocketHT).
We flip NUMA cells and Sockets to exercise the code.
TODO(fromanirh): replace with a real-world topology
once we find a suitable one.
*/
fakeTopoMultiSocketDualSocketPerNumaHT = &topology.CPUTopology{
NumCPUs: 80,
NumSockets: 4,
NumCores: 40,
NumNUMANodes: 2,
CPUDetails: map[int]topology.CPUInfo{
0: {CoreID: 0, SocketID: 0, NUMANodeID: 0},
1: {CoreID: 1, SocketID: 0, NUMANodeID: 0},
2: {CoreID: 2, SocketID: 0, NUMANodeID: 0},
3: {CoreID: 3, SocketID: 0, NUMANodeID: 0},
4: {CoreID: 4, SocketID: 0, NUMANodeID: 0},
5: {CoreID: 5, SocketID: 0, NUMANodeID: 0},
6: {CoreID: 6, SocketID: 0, NUMANodeID: 0},
7: {CoreID: 7, SocketID: 0, NUMANodeID: 0},
8: {CoreID: 8, SocketID: 0, NUMANodeID: 0},
9: {CoreID: 9, SocketID: 0, NUMANodeID: 0},
10: {CoreID: 10, SocketID: 1, NUMANodeID: 0},
11: {CoreID: 11, SocketID: 1, NUMANodeID: 0},
12: {CoreID: 12, SocketID: 1, NUMANodeID: 0},
13: {CoreID: 13, SocketID: 1, NUMANodeID: 0},
14: {CoreID: 14, SocketID: 1, NUMANodeID: 0},
15: {CoreID: 15, SocketID: 1, NUMANodeID: 0},
16: {CoreID: 16, SocketID: 1, NUMANodeID: 0},
17: {CoreID: 17, SocketID: 1, NUMANodeID: 0},
18: {CoreID: 18, SocketID: 1, NUMANodeID: 0},
19: {CoreID: 19, SocketID: 1, NUMANodeID: 0},
20: {CoreID: 20, SocketID: 2, NUMANodeID: 1},
21: {CoreID: 21, SocketID: 2, NUMANodeID: 1},
22: {CoreID: 22, SocketID: 2, NUMANodeID: 1},
23: {CoreID: 23, SocketID: 2, NUMANodeID: 1},
24: {CoreID: 24, SocketID: 2, NUMANodeID: 1},
25: {CoreID: 25, SocketID: 2, NUMANodeID: 1},
26: {CoreID: 26, SocketID: 2, NUMANodeID: 1},
27: {CoreID: 27, SocketID: 2, NUMANodeID: 1},
28: {CoreID: 28, SocketID: 2, NUMANodeID: 1},
29: {CoreID: 29, SocketID: 2, NUMANodeID: 1},
30: {CoreID: 30, SocketID: 3, NUMANodeID: 1},
31: {CoreID: 31, SocketID: 3, NUMANodeID: 1},
32: {CoreID: 32, SocketID: 3, NUMANodeID: 1},
33: {CoreID: 33, SocketID: 3, NUMANodeID: 1},
34: {CoreID: 34, SocketID: 3, NUMANodeID: 1},
35: {CoreID: 35, SocketID: 3, NUMANodeID: 1},
36: {CoreID: 36, SocketID: 3, NUMANodeID: 1},
37: {CoreID: 37, SocketID: 3, NUMANodeID: 1},
38: {CoreID: 38, SocketID: 3, NUMANodeID: 1},
39: {CoreID: 39, SocketID: 3, NUMANodeID: 1},
40: {CoreID: 0, SocketID: 0, NUMANodeID: 0},
41: {CoreID: 1, SocketID: 0, NUMANodeID: 0},
42: {CoreID: 2, SocketID: 0, NUMANodeID: 0},
43: {CoreID: 3, SocketID: 0, NUMANodeID: 0},
44: {CoreID: 4, SocketID: 0, NUMANodeID: 0},
45: {CoreID: 5, SocketID: 0, NUMANodeID: 0},
46: {CoreID: 6, SocketID: 0, NUMANodeID: 0},
47: {CoreID: 7, SocketID: 0, NUMANodeID: 0},
48: {CoreID: 8, SocketID: 0, NUMANodeID: 0},
49: {CoreID: 9, SocketID: 0, NUMANodeID: 0},
50: {CoreID: 10, SocketID: 1, NUMANodeID: 0},
51: {CoreID: 11, SocketID: 1, NUMANodeID: 0},
52: {CoreID: 12, SocketID: 1, NUMANodeID: 0},
53: {CoreID: 13, SocketID: 1, NUMANodeID: 0},
54: {CoreID: 14, SocketID: 1, NUMANodeID: 0},
55: {CoreID: 15, SocketID: 1, NUMANodeID: 0},
56: {CoreID: 16, SocketID: 1, NUMANodeID: 0},
57: {CoreID: 17, SocketID: 1, NUMANodeID: 0},
58: {CoreID: 18, SocketID: 1, NUMANodeID: 0},
59: {CoreID: 19, SocketID: 1, NUMANodeID: 0},
60: {CoreID: 20, SocketID: 2, NUMANodeID: 1},
61: {CoreID: 21, SocketID: 2, NUMANodeID: 1},
62: {CoreID: 22, SocketID: 2, NUMANodeID: 1},
63: {CoreID: 23, SocketID: 2, NUMANodeID: 1},
64: {CoreID: 24, SocketID: 2, NUMANodeID: 1},
65: {CoreID: 25, SocketID: 2, NUMANodeID: 1},
66: {CoreID: 26, SocketID: 2, NUMANodeID: 1},
67: {CoreID: 27, SocketID: 2, NUMANodeID: 1},
68: {CoreID: 28, SocketID: 2, NUMANodeID: 1},
69: {CoreID: 29, SocketID: 2, NUMANodeID: 1},
70: {CoreID: 30, SocketID: 3, NUMANodeID: 1},
71: {CoreID: 31, SocketID: 3, NUMANodeID: 1},
72: {CoreID: 32, SocketID: 3, NUMANodeID: 1},
73: {CoreID: 33, SocketID: 3, NUMANodeID: 1},
74: {CoreID: 34, SocketID: 3, NUMANodeID: 1},
75: {CoreID: 35, SocketID: 3, NUMANodeID: 1},
76: {CoreID: 36, SocketID: 3, NUMANodeID: 1},
77: {CoreID: 37, SocketID: 3, NUMANodeID: 1},
78: {CoreID: 38, SocketID: 3, NUMANodeID: 1},
79: {CoreID: 39, SocketID: 3, NUMANodeID: 1},
},
}
)

View File

@ -34,12 +34,14 @@ type CPUDetails map[int]CPUInfo
// CPUTopology contains details of node cpu, where :
// CPU - logical CPU, cadvisor - thread
// Core - physical CPU, cadvisor - Core
// Socket - socket, cadvisor - Node
// Socket - socket, cadvisor - Socket
// NUMA Node - NUMA cell, cadvisor - Node
type CPUTopology struct {
NumCPUs int
NumCores int
NumSockets int
CPUDetails CPUDetails
NumCPUs int
NumCores int
NumSockets int
NumNUMANodes int
CPUDetails CPUDetails
}
// CPUsPerCore returns the number of logical CPUs are associated with
@ -243,16 +245,17 @@ func Discover(machineInfo *cadvisorapi.MachineInfo) (*CPUTopology, error) {
}
return &CPUTopology{
NumCPUs: machineInfo.NumCores,
NumSockets: machineInfo.NumSockets,
NumCores: numPhysicalCores,
CPUDetails: CPUDetails,
NumCPUs: machineInfo.NumCores,
NumSockets: machineInfo.NumSockets,
NumCores: numPhysicalCores,
NumNUMANodes: CPUDetails.NUMANodes().Size(),
CPUDetails: CPUDetails,
}, nil
}
// getUniqueCoreID computes coreId as the lowest cpuID
// for a given Threads []int slice. This will assure that coreID's are
// platform unique (opposite to what cAdvisor reports - socket unique)
// platform unique (opposite to what cAdvisor reports)
func getUniqueCoreID(threads []int) (coreID int, err error) {
if len(threads) == 0 {
return 0, fmt.Errorf("no cpus provided")

View File

@ -21,6 +21,7 @@ import (
"testing"
cadvisorapi "github.com/google/cadvisor/info/v1"
"github.com/google/go-cmp/cmp"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
)
@ -57,9 +58,10 @@ func Test_Discover(t *testing.T) {
},
},
want: &CPUTopology{
NumCPUs: 8,
NumSockets: 1,
NumCores: 4,
NumCPUs: 8,
NumSockets: 1,
NumCores: 4,
NumNUMANodes: 1,
CPUDetails: map[int]CPUInfo{
0: {CoreID: 0, SocketID: 0, NUMANodeID: 0},
1: {CoreID: 1, SocketID: 0, NUMANodeID: 0},
@ -73,6 +75,318 @@ func Test_Discover(t *testing.T) {
},
wantErr: false,
},
{
// dual xeon gold 6230
name: "DualSocketMultiNumaPerSocketHT",
machineInfo: cadvisorapi.MachineInfo{
NumCores: 80,
NumSockets: 2,
Topology: []cadvisorapi.Node{
{Id: 0,
Cores: []cadvisorapi.Core{
{SocketID: 0, Id: 0, Threads: []int{0, 40}},
{SocketID: 0, Id: 1, Threads: []int{1, 41}},
{SocketID: 0, Id: 2, Threads: []int{2, 42}},
{SocketID: 0, Id: 8, Threads: []int{3, 43}},
{SocketID: 0, Id: 9, Threads: []int{4, 44}},
{SocketID: 0, Id: 16, Threads: []int{5, 45}},
{SocketID: 0, Id: 17, Threads: []int{6, 46}},
{SocketID: 0, Id: 18, Threads: []int{7, 47}},
{SocketID: 0, Id: 24, Threads: []int{8, 48}},
{SocketID: 0, Id: 25, Threads: []int{9, 49}},
},
},
{Id: 1,
Cores: []cadvisorapi.Core{
{SocketID: 0, Id: 3, Threads: []int{10, 50}},
{SocketID: 0, Id: 4, Threads: []int{11, 51}},
{SocketID: 0, Id: 10, Threads: []int{12, 52}},
{SocketID: 0, Id: 11, Threads: []int{13, 53}},
{SocketID: 0, Id: 12, Threads: []int{14, 54}},
{SocketID: 0, Id: 19, Threads: []int{15, 55}},
{SocketID: 0, Id: 20, Threads: []int{16, 56}},
{SocketID: 0, Id: 26, Threads: []int{17, 57}},
{SocketID: 0, Id: 27, Threads: []int{18, 58}},
{SocketID: 0, Id: 28, Threads: []int{19, 59}},
},
},
{Id: 2,
Cores: []cadvisorapi.Core{
{SocketID: 1, Id: 0, Threads: []int{20, 60}},
{SocketID: 1, Id: 1, Threads: []int{21, 61}},
{SocketID: 1, Id: 2, Threads: []int{22, 62}},
{SocketID: 1, Id: 8, Threads: []int{23, 63}},
{SocketID: 1, Id: 9, Threads: []int{24, 64}},
{SocketID: 1, Id: 16, Threads: []int{25, 65}},
{SocketID: 1, Id: 17, Threads: []int{26, 66}},
{SocketID: 1, Id: 18, Threads: []int{27, 67}},
{SocketID: 1, Id: 24, Threads: []int{28, 68}},
{SocketID: 1, Id: 25, Threads: []int{29, 69}},
},
},
{Id: 3,
Cores: []cadvisorapi.Core{
{SocketID: 1, Id: 3, Threads: []int{30, 70}},
{SocketID: 1, Id: 4, Threads: []int{31, 71}},
{SocketID: 1, Id: 10, Threads: []int{32, 72}},
{SocketID: 1, Id: 11, Threads: []int{33, 73}},
{SocketID: 1, Id: 12, Threads: []int{34, 74}},
{SocketID: 1, Id: 19, Threads: []int{35, 75}},
{SocketID: 1, Id: 20, Threads: []int{36, 76}},
{SocketID: 1, Id: 26, Threads: []int{37, 77}},
{SocketID: 1, Id: 27, Threads: []int{38, 78}},
{SocketID: 1, Id: 28, Threads: []int{39, 79}},
},
},
},
},
want: &CPUTopology{
NumCPUs: 80,
NumSockets: 2,
NumCores: 40,
NumNUMANodes: 4,
CPUDetails: map[int]CPUInfo{
0: {CoreID: 0, SocketID: 0, NUMANodeID: 0},
1: {CoreID: 1, SocketID: 0, NUMANodeID: 0},
2: {CoreID: 2, SocketID: 0, NUMANodeID: 0},
3: {CoreID: 3, SocketID: 0, NUMANodeID: 0},
4: {CoreID: 4, SocketID: 0, NUMANodeID: 0},
5: {CoreID: 5, SocketID: 0, NUMANodeID: 0},
6: {CoreID: 6, SocketID: 0, NUMANodeID: 0},
7: {CoreID: 7, SocketID: 0, NUMANodeID: 0},
8: {CoreID: 8, SocketID: 0, NUMANodeID: 0},
9: {CoreID: 9, SocketID: 0, NUMANodeID: 0},
10: {CoreID: 10, SocketID: 0, NUMANodeID: 1},
11: {CoreID: 11, SocketID: 0, NUMANodeID: 1},
12: {CoreID: 12, SocketID: 0, NUMANodeID: 1},
13: {CoreID: 13, SocketID: 0, NUMANodeID: 1},
14: {CoreID: 14, SocketID: 0, NUMANodeID: 1},
15: {CoreID: 15, SocketID: 0, NUMANodeID: 1},
16: {CoreID: 16, SocketID: 0, NUMANodeID: 1},
17: {CoreID: 17, SocketID: 0, NUMANodeID: 1},
18: {CoreID: 18, SocketID: 0, NUMANodeID: 1},
19: {CoreID: 19, SocketID: 0, NUMANodeID: 1},
20: {CoreID: 20, SocketID: 1, NUMANodeID: 2},
21: {CoreID: 21, SocketID: 1, NUMANodeID: 2},
22: {CoreID: 22, SocketID: 1, NUMANodeID: 2},
23: {CoreID: 23, SocketID: 1, NUMANodeID: 2},
24: {CoreID: 24, SocketID: 1, NUMANodeID: 2},
25: {CoreID: 25, SocketID: 1, NUMANodeID: 2},
26: {CoreID: 26, SocketID: 1, NUMANodeID: 2},
27: {CoreID: 27, SocketID: 1, NUMANodeID: 2},
28: {CoreID: 28, SocketID: 1, NUMANodeID: 2},
29: {CoreID: 29, SocketID: 1, NUMANodeID: 2},
30: {CoreID: 30, SocketID: 1, NUMANodeID: 3},
31: {CoreID: 31, SocketID: 1, NUMANodeID: 3},
32: {CoreID: 32, SocketID: 1, NUMANodeID: 3},
33: {CoreID: 33, SocketID: 1, NUMANodeID: 3},
34: {CoreID: 34, SocketID: 1, NUMANodeID: 3},
35: {CoreID: 35, SocketID: 1, NUMANodeID: 3},
36: {CoreID: 36, SocketID: 1, NUMANodeID: 3},
37: {CoreID: 37, SocketID: 1, NUMANodeID: 3},
38: {CoreID: 38, SocketID: 1, NUMANodeID: 3},
39: {CoreID: 39, SocketID: 1, NUMANodeID: 3},
40: {CoreID: 0, SocketID: 0, NUMANodeID: 0},
41: {CoreID: 1, SocketID: 0, NUMANodeID: 0},
42: {CoreID: 2, SocketID: 0, NUMANodeID: 0},
43: {CoreID: 3, SocketID: 0, NUMANodeID: 0},
44: {CoreID: 4, SocketID: 0, NUMANodeID: 0},
45: {CoreID: 5, SocketID: 0, NUMANodeID: 0},
46: {CoreID: 6, SocketID: 0, NUMANodeID: 0},
47: {CoreID: 7, SocketID: 0, NUMANodeID: 0},
48: {CoreID: 8, SocketID: 0, NUMANodeID: 0},
49: {CoreID: 9, SocketID: 0, NUMANodeID: 0},
50: {CoreID: 10, SocketID: 0, NUMANodeID: 1},
51: {CoreID: 11, SocketID: 0, NUMANodeID: 1},
52: {CoreID: 12, SocketID: 0, NUMANodeID: 1},
53: {CoreID: 13, SocketID: 0, NUMANodeID: 1},
54: {CoreID: 14, SocketID: 0, NUMANodeID: 1},
55: {CoreID: 15, SocketID: 0, NUMANodeID: 1},
56: {CoreID: 16, SocketID: 0, NUMANodeID: 1},
57: {CoreID: 17, SocketID: 0, NUMANodeID: 1},
58: {CoreID: 18, SocketID: 0, NUMANodeID: 1},
59: {CoreID: 19, SocketID: 0, NUMANodeID: 1},
60: {CoreID: 20, SocketID: 1, NUMANodeID: 2},
61: {CoreID: 21, SocketID: 1, NUMANodeID: 2},
62: {CoreID: 22, SocketID: 1, NUMANodeID: 2},
63: {CoreID: 23, SocketID: 1, NUMANodeID: 2},
64: {CoreID: 24, SocketID: 1, NUMANodeID: 2},
65: {CoreID: 25, SocketID: 1, NUMANodeID: 2},
66: {CoreID: 26, SocketID: 1, NUMANodeID: 2},
67: {CoreID: 27, SocketID: 1, NUMANodeID: 2},
68: {CoreID: 28, SocketID: 1, NUMANodeID: 2},
69: {CoreID: 29, SocketID: 1, NUMANodeID: 2},
70: {CoreID: 30, SocketID: 1, NUMANodeID: 3},
71: {CoreID: 31, SocketID: 1, NUMANodeID: 3},
72: {CoreID: 32, SocketID: 1, NUMANodeID: 3},
73: {CoreID: 33, SocketID: 1, NUMANodeID: 3},
74: {CoreID: 34, SocketID: 1, NUMANodeID: 3},
75: {CoreID: 35, SocketID: 1, NUMANodeID: 3},
76: {CoreID: 36, SocketID: 1, NUMANodeID: 3},
77: {CoreID: 37, SocketID: 1, NUMANodeID: 3},
78: {CoreID: 38, SocketID: 1, NUMANodeID: 3},
79: {CoreID: 39, SocketID: 1, NUMANodeID: 3},
},
},
wantErr: false,
},
{
// FAKE Topology from dual xeon gold 6230
// (see: dual xeon gold 6230).
// We flip NUMA cells and Sockets to exercise the code.
// TODO(fromanirh): replace with a real-world topology
// once we find a suitable one.
// Note: this is a fake topology. Thus, there is not a "correct"
// representation. This one was created following the these concepts:
// 1. be internally consistent (most important rule)
// 2. be as close as possible as existing HW topologies
// 3. if possible, minimize chances wrt existing HW topologies.
name: "DualNumaMultiSocketPerNumaHT",
machineInfo: cadvisorapi.MachineInfo{
NumCores: 80,
NumSockets: 4,
Topology: []cadvisorapi.Node{
{Id: 0,
Cores: []cadvisorapi.Core{
{SocketID: 0, Id: 0, Threads: []int{0, 40}},
{SocketID: 0, Id: 1, Threads: []int{1, 41}},
{SocketID: 0, Id: 2, Threads: []int{2, 42}},
{SocketID: 0, Id: 8, Threads: []int{3, 43}},
{SocketID: 0, Id: 9, Threads: []int{4, 44}},
{SocketID: 0, Id: 16, Threads: []int{5, 45}},
{SocketID: 0, Id: 17, Threads: []int{6, 46}},
{SocketID: 0, Id: 18, Threads: []int{7, 47}},
{SocketID: 0, Id: 24, Threads: []int{8, 48}},
{SocketID: 0, Id: 25, Threads: []int{9, 49}},
{SocketID: 1, Id: 3, Threads: []int{10, 50}},
{SocketID: 1, Id: 4, Threads: []int{11, 51}},
{SocketID: 1, Id: 10, Threads: []int{12, 52}},
{SocketID: 1, Id: 11, Threads: []int{13, 53}},
{SocketID: 1, Id: 12, Threads: []int{14, 54}},
{SocketID: 1, Id: 19, Threads: []int{15, 55}},
{SocketID: 1, Id: 20, Threads: []int{16, 56}},
{SocketID: 1, Id: 26, Threads: []int{17, 57}},
{SocketID: 1, Id: 27, Threads: []int{18, 58}},
{SocketID: 1, Id: 28, Threads: []int{19, 59}},
},
},
{Id: 1,
Cores: []cadvisorapi.Core{
{SocketID: 2, Id: 0, Threads: []int{20, 60}},
{SocketID: 2, Id: 1, Threads: []int{21, 61}},
{SocketID: 2, Id: 2, Threads: []int{22, 62}},
{SocketID: 2, Id: 8, Threads: []int{23, 63}},
{SocketID: 2, Id: 9, Threads: []int{24, 64}},
{SocketID: 2, Id: 16, Threads: []int{25, 65}},
{SocketID: 2, Id: 17, Threads: []int{26, 66}},
{SocketID: 2, Id: 18, Threads: []int{27, 67}},
{SocketID: 2, Id: 24, Threads: []int{28, 68}},
{SocketID: 2, Id: 25, Threads: []int{29, 69}},
{SocketID: 3, Id: 3, Threads: []int{30, 70}},
{SocketID: 3, Id: 4, Threads: []int{31, 71}},
{SocketID: 3, Id: 10, Threads: []int{32, 72}},
{SocketID: 3, Id: 11, Threads: []int{33, 73}},
{SocketID: 3, Id: 12, Threads: []int{34, 74}},
{SocketID: 3, Id: 19, Threads: []int{35, 75}},
{SocketID: 3, Id: 20, Threads: []int{36, 76}},
{SocketID: 3, Id: 26, Threads: []int{37, 77}},
{SocketID: 3, Id: 27, Threads: []int{38, 78}},
{SocketID: 3, Id: 28, Threads: []int{39, 79}},
},
},
},
},
want: &CPUTopology{
NumCPUs: 80,
NumSockets: 4,
NumCores: 40,
NumNUMANodes: 2,
CPUDetails: map[int]CPUInfo{
0: {CoreID: 0, SocketID: 0, NUMANodeID: 0},
1: {CoreID: 1, SocketID: 0, NUMANodeID: 0},
2: {CoreID: 2, SocketID: 0, NUMANodeID: 0},
3: {CoreID: 3, SocketID: 0, NUMANodeID: 0},
4: {CoreID: 4, SocketID: 0, NUMANodeID: 0},
5: {CoreID: 5, SocketID: 0, NUMANodeID: 0},
6: {CoreID: 6, SocketID: 0, NUMANodeID: 0},
7: {CoreID: 7, SocketID: 0, NUMANodeID: 0},
8: {CoreID: 8, SocketID: 0, NUMANodeID: 0},
9: {CoreID: 9, SocketID: 0, NUMANodeID: 0},
10: {CoreID: 10, SocketID: 1, NUMANodeID: 0},
11: {CoreID: 11, SocketID: 1, NUMANodeID: 0},
12: {CoreID: 12, SocketID: 1, NUMANodeID: 0},
13: {CoreID: 13, SocketID: 1, NUMANodeID: 0},
14: {CoreID: 14, SocketID: 1, NUMANodeID: 0},
15: {CoreID: 15, SocketID: 1, NUMANodeID: 0},
16: {CoreID: 16, SocketID: 1, NUMANodeID: 0},
17: {CoreID: 17, SocketID: 1, NUMANodeID: 0},
18: {CoreID: 18, SocketID: 1, NUMANodeID: 0},
19: {CoreID: 19, SocketID: 1, NUMANodeID: 0},
20: {CoreID: 20, SocketID: 2, NUMANodeID: 1},
21: {CoreID: 21, SocketID: 2, NUMANodeID: 1},
22: {CoreID: 22, SocketID: 2, NUMANodeID: 1},
23: {CoreID: 23, SocketID: 2, NUMANodeID: 1},
24: {CoreID: 24, SocketID: 2, NUMANodeID: 1},
25: {CoreID: 25, SocketID: 2, NUMANodeID: 1},
26: {CoreID: 26, SocketID: 2, NUMANodeID: 1},
27: {CoreID: 27, SocketID: 2, NUMANodeID: 1},
28: {CoreID: 28, SocketID: 2, NUMANodeID: 1},
29: {CoreID: 29, SocketID: 2, NUMANodeID: 1},
30: {CoreID: 30, SocketID: 3, NUMANodeID: 1},
31: {CoreID: 31, SocketID: 3, NUMANodeID: 1},
32: {CoreID: 32, SocketID: 3, NUMANodeID: 1},
33: {CoreID: 33, SocketID: 3, NUMANodeID: 1},
34: {CoreID: 34, SocketID: 3, NUMANodeID: 1},
35: {CoreID: 35, SocketID: 3, NUMANodeID: 1},
36: {CoreID: 36, SocketID: 3, NUMANodeID: 1},
37: {CoreID: 37, SocketID: 3, NUMANodeID: 1},
38: {CoreID: 38, SocketID: 3, NUMANodeID: 1},
39: {CoreID: 39, SocketID: 3, NUMANodeID: 1},
40: {CoreID: 0, SocketID: 0, NUMANodeID: 0},
41: {CoreID: 1, SocketID: 0, NUMANodeID: 0},
42: {CoreID: 2, SocketID: 0, NUMANodeID: 0},
43: {CoreID: 3, SocketID: 0, NUMANodeID: 0},
44: {CoreID: 4, SocketID: 0, NUMANodeID: 0},
45: {CoreID: 5, SocketID: 0, NUMANodeID: 0},
46: {CoreID: 6, SocketID: 0, NUMANodeID: 0},
47: {CoreID: 7, SocketID: 0, NUMANodeID: 0},
48: {CoreID: 8, SocketID: 0, NUMANodeID: 0},
49: {CoreID: 9, SocketID: 0, NUMANodeID: 0},
50: {CoreID: 10, SocketID: 1, NUMANodeID: 0},
51: {CoreID: 11, SocketID: 1, NUMANodeID: 0},
52: {CoreID: 12, SocketID: 1, NUMANodeID: 0},
53: {CoreID: 13, SocketID: 1, NUMANodeID: 0},
54: {CoreID: 14, SocketID: 1, NUMANodeID: 0},
55: {CoreID: 15, SocketID: 1, NUMANodeID: 0},
56: {CoreID: 16, SocketID: 1, NUMANodeID: 0},
57: {CoreID: 17, SocketID: 1, NUMANodeID: 0},
58: {CoreID: 18, SocketID: 1, NUMANodeID: 0},
59: {CoreID: 19, SocketID: 1, NUMANodeID: 0},
60: {CoreID: 20, SocketID: 2, NUMANodeID: 1},
61: {CoreID: 21, SocketID: 2, NUMANodeID: 1},
62: {CoreID: 22, SocketID: 2, NUMANodeID: 1},
63: {CoreID: 23, SocketID: 2, NUMANodeID: 1},
64: {CoreID: 24, SocketID: 2, NUMANodeID: 1},
65: {CoreID: 25, SocketID: 2, NUMANodeID: 1},
66: {CoreID: 26, SocketID: 2, NUMANodeID: 1},
67: {CoreID: 27, SocketID: 2, NUMANodeID: 1},
68: {CoreID: 28, SocketID: 2, NUMANodeID: 1},
69: {CoreID: 29, SocketID: 2, NUMANodeID: 1},
70: {CoreID: 30, SocketID: 3, NUMANodeID: 1},
71: {CoreID: 31, SocketID: 3, NUMANodeID: 1},
72: {CoreID: 32, SocketID: 3, NUMANodeID: 1},
73: {CoreID: 33, SocketID: 3, NUMANodeID: 1},
74: {CoreID: 34, SocketID: 3, NUMANodeID: 1},
75: {CoreID: 35, SocketID: 3, NUMANodeID: 1},
76: {CoreID: 36, SocketID: 3, NUMANodeID: 1},
77: {CoreID: 37, SocketID: 3, NUMANodeID: 1},
78: {CoreID: 38, SocketID: 3, NUMANodeID: 1},
79: {CoreID: 39, SocketID: 3, NUMANodeID: 1},
},
},
wantErr: false,
},
{
name: "DualSocketNoHT",
machineInfo: cadvisorapi.MachineInfo{
@ -94,9 +408,10 @@ func Test_Discover(t *testing.T) {
},
},
want: &CPUTopology{
NumCPUs: 4,
NumSockets: 2,
NumCores: 4,
NumCPUs: 4,
NumSockets: 2,
NumCores: 4,
NumNUMANodes: 2,
CPUDetails: map[int]CPUInfo{
0: {CoreID: 0, SocketID: 0, NUMANodeID: 0},
1: {CoreID: 1, SocketID: 1, NUMANodeID: 1},
@ -129,9 +444,10 @@ func Test_Discover(t *testing.T) {
},
},
want: &CPUTopology{
NumCPUs: 12,
NumSockets: 2,
NumCores: 6,
NumCPUs: 12,
NumSockets: 2,
NumCores: 6,
NumNUMANodes: 2,
CPUDetails: map[int]CPUInfo{
0: {CoreID: 0, SocketID: 0, NUMANodeID: 0},
1: {CoreID: 1, SocketID: 0, NUMANodeID: 0},
@ -199,8 +515,8 @@ func Test_Discover(t *testing.T) {
}
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("Discover() = %v, want %v", got, tt.want)
if diff := cmp.Diff(got, tt.want); diff != "" {
t.Errorf("Discover() = %v, want %v diff=%s", got, tt.want, diff)
}
})
}