Merge pull request #106599 from klueska/fix-numa-bug

Fix Bugs in CPUManager distribute NUMA policy option
This commit is contained in:
Kubernetes Prow Robot 2021-12-10 04:41:12 -08:00 committed by GitHub
commit 1b0d83f1d6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 458 additions and 61 deletions

View File

@ -45,27 +45,39 @@ func (m mapIntInt) Clone() mapIntInt {
}
func (m mapIntInt) Keys() []int {
keys := make([]int, len(m))
var keys []int
for k := range m {
keys = append(keys, k)
}
return keys
}
func (m mapIntInt) Values() []int {
values := make([]int, len(m))
for _, v := range m {
values = append(values, v)
func (m mapIntInt) Values(keys ...int) []int {
if keys == nil {
keys = m.Keys()
}
var values []int
for _, k := range keys {
values = append(values, m[k])
}
return values
}
func sum(xs []int) int {
var s int
for _, x := range xs {
s += x
}
return s
}
func mean(xs []int) float64 {
var sum float64
for _, x := range xs {
sum += float64(x)
}
return sum / float64(len(xs))
m := sum / float64(len(xs))
return math.Round(m*1000) / 1000
}
func standardDeviation(xs []int) float64 {
@ -74,7 +86,8 @@ func standardDeviation(xs []int) float64 {
for _, x := range xs {
sum += (float64(x) - m) * (float64(x) - m)
}
return math.Sqrt(sum / float64(len(xs)))
s := math.Sqrt(sum / float64(len(xs)))
return math.Round(s*1000) / 1000
}
func min(x, y int) int {
@ -365,20 +378,25 @@ func (a *cpuAccumulator) takeRemainingCPUs() {
}
func (a *cpuAccumulator) rangeNUMANodesNeededToSatisfy(cpuGroupSize int) (int, int) {
// Get the total number of NUMA nodes in the system.
numNUMANodes := a.topo.CPUDetails.NUMANodes().Size()
// Get the total number of NUMA nodes that have CPUs available on them.
numNUMANodesAvailable := a.details.NUMANodes().Size()
// Get the total number of CPUs available across all NUMA nodes.
numCPUsAvailable := a.details.CPUs().Size()
// Get the total number of CPUs in the system.
numCPUs := a.topo.CPUDetails.CPUs().Size()
// Get the total number of 'cpuGroups' in the system.
numCPUGroups := (numCPUs-1)/cpuGroupSize + 1
// Calculate the number of 'cpuGroups' per NUMA Node in the system (rounding up).
numCPUGroupsPerNUMANode := (numCPUGroups-1)/numNUMANodes + 1
// Calculate the number of available 'cpuGroups' across all NUMA nodes as
// well as the number of 'cpuGroups' that need to be allocated (rounding up).
numCPUGroupsAvailable := (numCPUsAvailable-1)/cpuGroupSize + 1
numCPUGroupsNeeded := (a.numCPUsNeeded-1)/cpuGroupSize + 1
// Calculate the number of available 'cpuGroups' per NUMA Node (rounding up).
numCPUGroupsPerNUMANode := (numCPUGroupsAvailable-1)/numNUMANodesAvailable + 1
// Calculate the minimum number of numa nodes required to satisfy the
// allocation (rounding up).
minNUMAs := (numCPUGroupsNeeded-1)/numCPUGroupsPerNUMANode + 1
@ -532,6 +550,14 @@ func takeByTopologyNUMAPacked(topo *topology.CPUTopology, availableCPUs cpuset.C
// important, for example, to ensure that all CPUs (i.e. all hyperthreads) from
// a single core are allocated together.
func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, numCPUs int, cpuGroupSize int) (cpuset.CPUSet, error) {
// If the number of CPUs requested cannot be handed out in chunks of
// 'cpuGroupSize', then we just call out the packing algorithm since we
// can't distribute CPUs in this chunk size.
if (numCPUs % cpuGroupSize) != 0 {
return takeByTopologyNUMAPacked(topo, availableCPUs, numCPUs)
}
// Otherwise build an accumulator to start allocating CPUs from.
acc := newCPUAccumulator(topo, availableCPUs, numCPUs)
if acc.isSatisfied() {
return acc.result, nil
@ -595,13 +621,16 @@ func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpu
}
// Calculate how many CPUs will be available on each NUMA node in
// 'combo' after allocating an even distribution of CPU groups of
// size 'cpuGroupSize' from them. This will be used in the "balance
// score" calculation to help decide if this combo should
// ultimately be chosen.
availableAfterAllocation := make(mapIntInt, len(combo))
// the system after allocating an even distribution of CPU groups
// of size 'cpuGroupSize' from each NUMA node in 'combo'. This will
// be used in the "balance score" calculation to help decide if
// this combo should ultimately be chosen.
availableAfterAllocation := make(mapIntInt, len(numas))
for _, numa := range numas {
availableAfterAllocation[numa] = acc.details.CPUsInNUMANodes(numa).Size()
}
for _, numa := range combo {
availableAfterAllocation[numa] = acc.details.CPUsInNUMANodes(numa).Size() - distribution
availableAfterAllocation[numa] -= distribution
}
// Check if there are any remaining CPUs to distribute across the
@ -609,9 +638,20 @@ func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpu
// size 'cpuGroupSize'.
remainder := numCPUs - (distribution * len(combo))
// Get a list of NUMA nodes to consider pulling the remainder CPUs
// from. This list excludes NUMA nodes that don't have at least
// 'cpuGroupSize' CPUs available after being allocated
// 'distribution' number of CPUs.
var remainderCombo []int
for _, numa := range combo {
if availableAfterAllocation[numa] >= cpuGroupSize {
remainderCombo = append(remainderCombo, numa)
}
}
// Declare a set of local variables to help track the "balance
// scores" calculated when using different subsets of 'combo' to
// allocate remainder CPUs from.
// scores" calculated when using different subsets of
// 'remainderCombo' to allocate remainder CPUs from.
var bestLocalBalance float64 = math.MaxFloat64
var bestLocalRemainder []int = nil
@ -624,31 +664,54 @@ func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpu
}
// Otherwise, find the best "balance score" when allocating the
// remainder CPUs across different subsets of NUMA nodes in 'combo'.
// remainder CPUs across different subsets of NUMA nodes in 'remainderCombo'.
// These remainder CPUs are handed out in groups of size 'cpuGroupSize'.
acc.iterateCombinations(combo, remainder/cpuGroupSize, func(subset []int) LoopControl {
// Make a local copy of 'availableAfterAllocation'.
availableAfterAllocation := availableAfterAllocation.Clone()
// We start from k=len(remainderCombo) and walk down to k=1 so that
// we continue to distribute CPUs as much as possible across
// multiple NUMA nodes.
for k := len(remainderCombo); remainder > 0 && k >= 1; k-- {
acc.iterateCombinations(remainderCombo, k, func(subset []int) LoopControl {
// Make a local copy of 'remainder'.
remainder := remainder
// For all NUMA nodes in 'subset', remove another
// 'cpuGroupSize' number of CPUs (to account for any remainder
// CPUs that will be allocated on them).
for _, numa := range subset {
availableAfterAllocation[numa] -= cpuGroupSize
}
// Make a local copy of 'availableAfterAllocation'.
availableAfterAllocation := availableAfterAllocation.Clone()
// Calculate the "balance score" as the standard deviation of
// the number of CPUs available on all NUMA nodes in 'combo'
// after the remainder CPUs have been allocated across 'subset'
// in groups of size 'cpuGroupSize'.
balance := standardDeviation(availableAfterAllocation.Values())
if balance < bestLocalBalance {
bestLocalBalance = balance
bestLocalRemainder = subset
}
// If this subset is not capable of allocating all
// remainder CPUs, continue to the next one.
if sum(availableAfterAllocation.Values(subset...)) < remainder {
return Continue
}
return Continue
})
// For all NUMA nodes in 'subset', walk through them,
// removing 'cpuGroupSize' number of CPUs from each
// until all remainder CPUs have been accounted for.
for remainder > 0 {
for _, numa := range subset {
if remainder == 0 {
break
}
if availableAfterAllocation[numa] < cpuGroupSize {
continue
}
availableAfterAllocation[numa] -= cpuGroupSize
remainder -= cpuGroupSize
}
}
// Calculate the "balance score" as the standard deviation
// of the number of CPUs available on all NUMA nodes in the
// system after the remainder CPUs have been allocated
// across 'subset' in groups of size 'cpuGroupSize'.
balance := standardDeviation(availableAfterAllocation.Values())
if balance < bestLocalBalance {
bestLocalBalance = balance
bestLocalRemainder = subset
}
return Continue
})
}
// If the best "balance score" for this combo is less than the
// lowest "balance score" of all previous combos, then update this
@ -680,9 +743,19 @@ func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpu
// Then allocate any remaining CPUs in groups of size 'cpuGroupSize'
// from each NUMA node in the remainder set.
for _, numa := range bestRemainder {
cpus, _ := takeByTopologyNUMAPacked(acc.topo, acc.details.CPUsInNUMANodes(numa), cpuGroupSize)
acc.take(cpus)
remainder := numCPUs - (distribution * len(bestCombo))
for remainder > 0 {
for _, numa := range bestRemainder {
if remainder == 0 {
break
}
if acc.details.CPUsInNUMANodes(numa).Size() < cpuGroupSize {
continue
}
cpus, _ := takeByTopologyNUMAPacked(acc.topo, acc.details.CPUsInNUMANodes(numa), cpuGroupSize)
acc.take(cpus)
remainder -= cpuGroupSize
}
}
// If we haven't allocated all of our CPUs at this point, then something

View File

@ -573,14 +573,6 @@ func commonTakeByTopologyTestCases(t *testing.T) []takeByTopologyTestCase {
"",
cpuset.NewCPUSet(2, 6),
},
{
"take one cpu from dual socket with HT - core from Socket 0",
topoDualSocketHT,
cpuset.NewCPUSet(1, 2, 3, 4, 5, 7, 8, 9, 10, 11),
1,
"",
cpuset.NewCPUSet(2),
},
{
"take a socket of cpus from dual socket with HT",
topoDualSocketHT,
@ -635,6 +627,14 @@ func commonTakeByTopologyTestCases(t *testing.T) []takeByTopologyTestCase {
func TestTakeByTopologyNUMAPacked(t *testing.T) {
testCases := commonTakeByTopologyTestCases(t)
testCases = append(testCases, []takeByTopologyTestCase{
{
"take one cpu from dual socket with HT - core from Socket 0",
topoDualSocketHT,
cpuset.NewCPUSet(1, 2, 3, 4, 5, 7, 8, 9, 10, 11),
1,
"",
cpuset.NewCPUSet(2),
},
{
"allocate 4 full cores with 3 coming from the first NUMA node (filling it up) and 1 coming from the second NUMA node",
topoDualSocketHT,
@ -721,22 +721,22 @@ func commonTakeByTopologyExtendedTestCases(t *testing.T) []takeByTopologyExtende
mustParseCPUSet(t, "0-7,10-17,20-27,40-47,50-57,60-67"),
},
{
"allocate 24 full cores with 8 distributed across the first 3 NUMA nodes (filling the first NUMA node)",
"allocate 24 full cores with 8 distributed across the first 3 NUMA nodes (taking all but 2 from the first NUMA node)",
topoDualSocketMultiNumaPerSocketHT,
mustParseCPUSet(t, "2-39,42-79"),
mustParseCPUSet(t, "1-29,32-39,41-69,72-79"),
48,
1,
"",
mustParseCPUSet(t, "2-9,10-17,20-27,42-49,50-57,60-67"),
mustParseCPUSet(t, "1-8,10-17,20-27,41-48,50-57,60-67"),
},
{
"allocate 24 full cores with 8 distributed across the last 3 NUMA nodes (no room on the first NUMA node to distribute)",
"allocate 24 full cores with 8 distributed across the last 3 NUMA nodes (even though all 8 could be allocated from the first NUMA node)",
topoDualSocketMultiNumaPerSocketHT,
mustParseCPUSet(t, "3-39,43-79"),
mustParseCPUSet(t, "2-29,31-39,42-69,71-79"),
48,
1,
"",
mustParseCPUSet(t, "10-17,20-27,30-37,50-57,60-67,70-77"),
mustParseCPUSet(t, "10-17,20-27,31-38,50-57,60-67,71-78"),
},
{
"allocate 8 full cores with 2 distributed across each NUMA node",
@ -764,6 +764,24 @@ func commonTakeByTopologyExtendedTestCases(t *testing.T) []takeByTopologyExtende
func TestTakeByTopologyNUMADistributed(t *testing.T) {
testCases := commonTakeByTopologyExtendedTestCases(t)
testCases = append(testCases, []takeByTopologyExtendedTestCase{
{
"take one cpu from dual socket with HT - core from Socket 0",
topoDualSocketHT,
cpuset.NewCPUSet(1, 2, 3, 4, 5, 7, 8, 9, 10, 11),
1,
1,
"",
cpuset.NewCPUSet(1),
},
{
"take one cpu from dual socket with HT - core from Socket 0 - cpuGroupSize 2",
topoDualSocketHT,
cpuset.NewCPUSet(1, 2, 3, 4, 5, 7, 8, 9, 10, 11),
1,
2,
"",
cpuset.NewCPUSet(2),
},
{
"allocate 13 full cores distributed across the first 2 NUMA nodes",
topoDualSocketMultiNumaPerSocketHT,
@ -818,13 +836,37 @@ func TestTakeByTopologyNUMADistributed(t *testing.T) {
"",
mustParseCPUSet(t, "0-7,10-16,20-27,30-37,40-47,50-56,60-67,70-77"),
},
{
"ensure bestRemainder chosen with NUMA nodes that have enough CPUs to satisfy the request",
topoDualSocketMultiNumaPerSocketHT,
mustParseCPUSet(t, "0-3,10-13,20-23,30-36,40-43,50-53,60-63,70-76"),
34,
1,
"",
mustParseCPUSet(t, "0-3,10-13,20-23,30-34,40-43,50-53,60-63,70-74"),
},
{
"ensure previous failure encountered on live machine has been fixed (1/1)",
topoDualSocketMultiNumaPerSocketHTLarge,
mustParseCPUSet(t, "0,128,30,31,158,159,43-47,171-175,62,63,190,191,75-79,203-207,94,96,222,223,101-111,229-239,126,127,254,255"),
28,
1,
"",
mustParseCPUSet(t, "43-47,75-79,96,101-105,171-174,203-206,229-232"),
},
}...)
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
result, err := takeByTopologyNUMADistributed(tc.topo, tc.availableCPUs, tc.numCPUs, tc.cpuGroupSize)
if tc.expErr != "" && err.Error() != tc.expErr {
t.Errorf("expected error to be [%v] but it was [%v]", tc.expErr, err)
if err != nil {
if tc.expErr == "" {
t.Errorf("unexpected error [%v]", err)
}
if tc.expErr != "" && err.Error() != tc.expErr {
t.Errorf("expected error to be [%v] but it was [%v]", tc.expErr, err)
}
return
}
if !result.Equals(tc.expResult) {
t.Errorf("expected result [%s] to equal [%s]", result, tc.expResult)

View File

@ -610,4 +610,286 @@ var (
79: {CoreID: 39, SocketID: 3, NUMANodeID: 1},
},
}
/*
Topology from dual AMD EPYC 7742 64-Core Processor; lscpu excerpt
CPU(s): 256
On-line CPU(s) list: 0-255
Thread(s) per core: 2
Core(s) per socket: 64
Socket(s): 2
NUMA node(s): 8 (NPS=4)
NUMA node0 CPU(s): 0-15,128-143
NUMA node1 CPU(s): 16-31,144-159
NUMA node2 CPU(s): 32-47,160-175
NUMA node3 CPU(s): 48-63,176-191
NUMA node4 CPU(s): 64-79,192-207
NUMA node5 CPU(s): 80-95,208-223
NUMA node6 CPU(s): 96-111,224-239
NUMA node7 CPU(s): 112-127,240-255
*/
topoDualSocketMultiNumaPerSocketHTLarge = &topology.CPUTopology{
NumCPUs: 256,
NumSockets: 2,
NumCores: 128,
NumNUMANodes: 8,
CPUDetails: map[int]topology.CPUInfo{
0: {CoreID: 0, SocketID: 0, NUMANodeID: 0},
1: {CoreID: 1, SocketID: 0, NUMANodeID: 0},
2: {CoreID: 2, SocketID: 0, NUMANodeID: 0},
3: {CoreID: 3, SocketID: 0, NUMANodeID: 0},
4: {CoreID: 4, SocketID: 0, NUMANodeID: 0},
5: {CoreID: 5, SocketID: 0, NUMANodeID: 0},
6: {CoreID: 6, SocketID: 0, NUMANodeID: 0},
7: {CoreID: 7, SocketID: 0, NUMANodeID: 0},
8: {CoreID: 8, SocketID: 0, NUMANodeID: 0},
9: {CoreID: 9, SocketID: 0, NUMANodeID: 0},
10: {CoreID: 10, SocketID: 0, NUMANodeID: 0},
11: {CoreID: 11, SocketID: 0, NUMANodeID: 0},
12: {CoreID: 12, SocketID: 0, NUMANodeID: 0},
13: {CoreID: 13, SocketID: 0, NUMANodeID: 0},
14: {CoreID: 14, SocketID: 0, NUMANodeID: 0},
15: {CoreID: 15, SocketID: 0, NUMANodeID: 0},
16: {CoreID: 16, SocketID: 0, NUMANodeID: 1},
17: {CoreID: 17, SocketID: 0, NUMANodeID: 1},
18: {CoreID: 18, SocketID: 0, NUMANodeID: 1},
19: {CoreID: 19, SocketID: 0, NUMANodeID: 1},
20: {CoreID: 20, SocketID: 0, NUMANodeID: 1},
21: {CoreID: 21, SocketID: 0, NUMANodeID: 1},
22: {CoreID: 22, SocketID: 0, NUMANodeID: 1},
23: {CoreID: 23, SocketID: 0, NUMANodeID: 1},
24: {CoreID: 24, SocketID: 0, NUMANodeID: 1},
25: {CoreID: 25, SocketID: 0, NUMANodeID: 1},
26: {CoreID: 26, SocketID: 0, NUMANodeID: 1},
27: {CoreID: 27, SocketID: 0, NUMANodeID: 1},
28: {CoreID: 28, SocketID: 0, NUMANodeID: 1},
29: {CoreID: 29, SocketID: 0, NUMANodeID: 1},
30: {CoreID: 30, SocketID: 0, NUMANodeID: 1},
31: {CoreID: 31, SocketID: 0, NUMANodeID: 1},
32: {CoreID: 32, SocketID: 0, NUMANodeID: 2},
33: {CoreID: 33, SocketID: 0, NUMANodeID: 2},
34: {CoreID: 34, SocketID: 0, NUMANodeID: 2},
35: {CoreID: 35, SocketID: 0, NUMANodeID: 2},
36: {CoreID: 36, SocketID: 0, NUMANodeID: 2},
37: {CoreID: 37, SocketID: 0, NUMANodeID: 2},
38: {CoreID: 38, SocketID: 0, NUMANodeID: 2},
39: {CoreID: 39, SocketID: 0, NUMANodeID: 2},
40: {CoreID: 40, SocketID: 0, NUMANodeID: 2},
41: {CoreID: 41, SocketID: 0, NUMANodeID: 2},
42: {CoreID: 42, SocketID: 0, NUMANodeID: 2},
43: {CoreID: 43, SocketID: 0, NUMANodeID: 2},
44: {CoreID: 44, SocketID: 0, NUMANodeID: 2},
45: {CoreID: 45, SocketID: 0, NUMANodeID: 2},
46: {CoreID: 46, SocketID: 0, NUMANodeID: 2},
47: {CoreID: 47, SocketID: 0, NUMANodeID: 2},
48: {CoreID: 48, SocketID: 0, NUMANodeID: 3},
49: {CoreID: 49, SocketID: 0, NUMANodeID: 3},
50: {CoreID: 50, SocketID: 0, NUMANodeID: 3},
51: {CoreID: 51, SocketID: 0, NUMANodeID: 3},
52: {CoreID: 52, SocketID: 0, NUMANodeID: 3},
53: {CoreID: 53, SocketID: 0, NUMANodeID: 3},
54: {CoreID: 54, SocketID: 0, NUMANodeID: 3},
55: {CoreID: 55, SocketID: 0, NUMANodeID: 3},
56: {CoreID: 56, SocketID: 0, NUMANodeID: 3},
57: {CoreID: 57, SocketID: 0, NUMANodeID: 3},
58: {CoreID: 58, SocketID: 0, NUMANodeID: 3},
59: {CoreID: 59, SocketID: 0, NUMANodeID: 3},
60: {CoreID: 60, SocketID: 0, NUMANodeID: 3},
61: {CoreID: 61, SocketID: 0, NUMANodeID: 3},
62: {CoreID: 62, SocketID: 0, NUMANodeID: 3},
63: {CoreID: 63, SocketID: 0, NUMANodeID: 3},
64: {CoreID: 64, SocketID: 1, NUMANodeID: 4},
65: {CoreID: 65, SocketID: 1, NUMANodeID: 4},
66: {CoreID: 66, SocketID: 1, NUMANodeID: 4},
67: {CoreID: 67, SocketID: 1, NUMANodeID: 4},
68: {CoreID: 68, SocketID: 1, NUMANodeID: 4},
69: {CoreID: 69, SocketID: 1, NUMANodeID: 4},
70: {CoreID: 70, SocketID: 1, NUMANodeID: 4},
71: {CoreID: 71, SocketID: 1, NUMANodeID: 4},
72: {CoreID: 72, SocketID: 1, NUMANodeID: 4},
73: {CoreID: 73, SocketID: 1, NUMANodeID: 4},
74: {CoreID: 74, SocketID: 1, NUMANodeID: 4},
75: {CoreID: 75, SocketID: 1, NUMANodeID: 4},
76: {CoreID: 76, SocketID: 1, NUMANodeID: 4},
77: {CoreID: 77, SocketID: 1, NUMANodeID: 4},
78: {CoreID: 78, SocketID: 1, NUMANodeID: 4},
79: {CoreID: 79, SocketID: 1, NUMANodeID: 4},
80: {CoreID: 80, SocketID: 1, NUMANodeID: 5},
81: {CoreID: 81, SocketID: 1, NUMANodeID: 5},
82: {CoreID: 82, SocketID: 1, NUMANodeID: 5},
83: {CoreID: 83, SocketID: 1, NUMANodeID: 5},
84: {CoreID: 84, SocketID: 1, NUMANodeID: 5},
85: {CoreID: 85, SocketID: 1, NUMANodeID: 5},
86: {CoreID: 86, SocketID: 1, NUMANodeID: 5},
87: {CoreID: 87, SocketID: 1, NUMANodeID: 5},
88: {CoreID: 88, SocketID: 1, NUMANodeID: 5},
89: {CoreID: 89, SocketID: 1, NUMANodeID: 5},
90: {CoreID: 90, SocketID: 1, NUMANodeID: 5},
91: {CoreID: 91, SocketID: 1, NUMANodeID: 5},
92: {CoreID: 92, SocketID: 1, NUMANodeID: 5},
93: {CoreID: 93, SocketID: 1, NUMANodeID: 5},
94: {CoreID: 94, SocketID: 1, NUMANodeID: 5},
95: {CoreID: 95, SocketID: 1, NUMANodeID: 5},
96: {CoreID: 96, SocketID: 1, NUMANodeID: 6},
97: {CoreID: 97, SocketID: 1, NUMANodeID: 6},
98: {CoreID: 98, SocketID: 1, NUMANodeID: 6},
99: {CoreID: 99, SocketID: 1, NUMANodeID: 6},
100: {CoreID: 100, SocketID: 1, NUMANodeID: 6},
101: {CoreID: 101, SocketID: 1, NUMANodeID: 6},
102: {CoreID: 102, SocketID: 1, NUMANodeID: 6},
103: {CoreID: 103, SocketID: 1, NUMANodeID: 6},
104: {CoreID: 104, SocketID: 1, NUMANodeID: 6},
105: {CoreID: 105, SocketID: 1, NUMANodeID: 6},
106: {CoreID: 106, SocketID: 1, NUMANodeID: 6},
107: {CoreID: 107, SocketID: 1, NUMANodeID: 6},
108: {CoreID: 108, SocketID: 1, NUMANodeID: 6},
109: {CoreID: 109, SocketID: 1, NUMANodeID: 6},
110: {CoreID: 110, SocketID: 1, NUMANodeID: 6},
111: {CoreID: 111, SocketID: 1, NUMANodeID: 6},
112: {CoreID: 112, SocketID: 1, NUMANodeID: 7},
113: {CoreID: 113, SocketID: 1, NUMANodeID: 7},
114: {CoreID: 114, SocketID: 1, NUMANodeID: 7},
115: {CoreID: 115, SocketID: 1, NUMANodeID: 7},
116: {CoreID: 116, SocketID: 1, NUMANodeID: 7},
117: {CoreID: 117, SocketID: 1, NUMANodeID: 7},
118: {CoreID: 118, SocketID: 1, NUMANodeID: 7},
119: {CoreID: 119, SocketID: 1, NUMANodeID: 7},
120: {CoreID: 120, SocketID: 1, NUMANodeID: 7},
121: {CoreID: 121, SocketID: 1, NUMANodeID: 7},
122: {CoreID: 122, SocketID: 1, NUMANodeID: 7},
123: {CoreID: 123, SocketID: 1, NUMANodeID: 7},
124: {CoreID: 124, SocketID: 1, NUMANodeID: 7},
125: {CoreID: 125, SocketID: 1, NUMANodeID: 7},
126: {CoreID: 126, SocketID: 1, NUMANodeID: 7},
127: {CoreID: 127, SocketID: 1, NUMANodeID: 7},
128: {CoreID: 0, SocketID: 0, NUMANodeID: 0},
129: {CoreID: 1, SocketID: 0, NUMANodeID: 0},
130: {CoreID: 2, SocketID: 0, NUMANodeID: 0},
131: {CoreID: 3, SocketID: 0, NUMANodeID: 0},
132: {CoreID: 4, SocketID: 0, NUMANodeID: 0},
133: {CoreID: 5, SocketID: 0, NUMANodeID: 0},
134: {CoreID: 6, SocketID: 0, NUMANodeID: 0},
135: {CoreID: 7, SocketID: 0, NUMANodeID: 0},
136: {CoreID: 8, SocketID: 0, NUMANodeID: 0},
137: {CoreID: 9, SocketID: 0, NUMANodeID: 0},
138: {CoreID: 10, SocketID: 0, NUMANodeID: 0},
139: {CoreID: 11, SocketID: 0, NUMANodeID: 0},
140: {CoreID: 12, SocketID: 0, NUMANodeID: 0},
141: {CoreID: 13, SocketID: 0, NUMANodeID: 0},
142: {CoreID: 14, SocketID: 0, NUMANodeID: 0},
143: {CoreID: 15, SocketID: 0, NUMANodeID: 0},
144: {CoreID: 16, SocketID: 0, NUMANodeID: 1},
145: {CoreID: 17, SocketID: 0, NUMANodeID: 1},
146: {CoreID: 18, SocketID: 0, NUMANodeID: 1},
147: {CoreID: 19, SocketID: 0, NUMANodeID: 1},
148: {CoreID: 20, SocketID: 0, NUMANodeID: 1},
149: {CoreID: 21, SocketID: 0, NUMANodeID: 1},
150: {CoreID: 22, SocketID: 0, NUMANodeID: 1},
151: {CoreID: 23, SocketID: 0, NUMANodeID: 1},
152: {CoreID: 24, SocketID: 0, NUMANodeID: 1},
153: {CoreID: 25, SocketID: 0, NUMANodeID: 1},
154: {CoreID: 26, SocketID: 0, NUMANodeID: 1},
155: {CoreID: 27, SocketID: 0, NUMANodeID: 1},
156: {CoreID: 28, SocketID: 0, NUMANodeID: 1},
157: {CoreID: 29, SocketID: 0, NUMANodeID: 1},
158: {CoreID: 30, SocketID: 0, NUMANodeID: 1},
159: {CoreID: 31, SocketID: 0, NUMANodeID: 1},
160: {CoreID: 32, SocketID: 0, NUMANodeID: 2},
161: {CoreID: 33, SocketID: 0, NUMANodeID: 2},
162: {CoreID: 34, SocketID: 0, NUMANodeID: 2},
163: {CoreID: 35, SocketID: 0, NUMANodeID: 2},
164: {CoreID: 36, SocketID: 0, NUMANodeID: 2},
165: {CoreID: 37, SocketID: 0, NUMANodeID: 2},
166: {CoreID: 38, SocketID: 0, NUMANodeID: 2},
167: {CoreID: 39, SocketID: 0, NUMANodeID: 2},
168: {CoreID: 40, SocketID: 0, NUMANodeID: 2},
169: {CoreID: 41, SocketID: 0, NUMANodeID: 2},
170: {CoreID: 42, SocketID: 0, NUMANodeID: 2},
171: {CoreID: 43, SocketID: 0, NUMANodeID: 2},
172: {CoreID: 44, SocketID: 0, NUMANodeID: 2},
173: {CoreID: 45, SocketID: 0, NUMANodeID: 2},
174: {CoreID: 46, SocketID: 0, NUMANodeID: 2},
175: {CoreID: 47, SocketID: 0, NUMANodeID: 2},
176: {CoreID: 48, SocketID: 0, NUMANodeID: 3},
177: {CoreID: 49, SocketID: 0, NUMANodeID: 3},
178: {CoreID: 50, SocketID: 0, NUMANodeID: 3},
179: {CoreID: 51, SocketID: 0, NUMANodeID: 3},
180: {CoreID: 52, SocketID: 0, NUMANodeID: 3},
181: {CoreID: 53, SocketID: 0, NUMANodeID: 3},
182: {CoreID: 54, SocketID: 0, NUMANodeID: 3},
183: {CoreID: 55, SocketID: 0, NUMANodeID: 3},
184: {CoreID: 56, SocketID: 0, NUMANodeID: 3},
185: {CoreID: 57, SocketID: 0, NUMANodeID: 3},
186: {CoreID: 58, SocketID: 0, NUMANodeID: 3},
187: {CoreID: 59, SocketID: 0, NUMANodeID: 3},
188: {CoreID: 60, SocketID: 0, NUMANodeID: 3},
189: {CoreID: 61, SocketID: 0, NUMANodeID: 3},
190: {CoreID: 62, SocketID: 0, NUMANodeID: 3},
191: {CoreID: 63, SocketID: 0, NUMANodeID: 3},
192: {CoreID: 64, SocketID: 1, NUMANodeID: 4},
193: {CoreID: 65, SocketID: 1, NUMANodeID: 4},
194: {CoreID: 66, SocketID: 1, NUMANodeID: 4},
195: {CoreID: 67, SocketID: 1, NUMANodeID: 4},
196: {CoreID: 68, SocketID: 1, NUMANodeID: 4},
197: {CoreID: 69, SocketID: 1, NUMANodeID: 4},
198: {CoreID: 70, SocketID: 1, NUMANodeID: 4},
199: {CoreID: 71, SocketID: 1, NUMANodeID: 4},
200: {CoreID: 72, SocketID: 1, NUMANodeID: 4},
201: {CoreID: 73, SocketID: 1, NUMANodeID: 4},
202: {CoreID: 74, SocketID: 1, NUMANodeID: 4},
203: {CoreID: 75, SocketID: 1, NUMANodeID: 4},
204: {CoreID: 76, SocketID: 1, NUMANodeID: 4},
205: {CoreID: 77, SocketID: 1, NUMANodeID: 4},
206: {CoreID: 78, SocketID: 1, NUMANodeID: 4},
207: {CoreID: 79, SocketID: 1, NUMANodeID: 4},
208: {CoreID: 80, SocketID: 1, NUMANodeID: 5},
209: {CoreID: 81, SocketID: 1, NUMANodeID: 5},
210: {CoreID: 82, SocketID: 1, NUMANodeID: 5},
211: {CoreID: 83, SocketID: 1, NUMANodeID: 5},
212: {CoreID: 84, SocketID: 1, NUMANodeID: 5},
213: {CoreID: 85, SocketID: 1, NUMANodeID: 5},
214: {CoreID: 86, SocketID: 1, NUMANodeID: 5},
215: {CoreID: 87, SocketID: 1, NUMANodeID: 5},
216: {CoreID: 88, SocketID: 1, NUMANodeID: 5},
217: {CoreID: 89, SocketID: 1, NUMANodeID: 5},
218: {CoreID: 90, SocketID: 1, NUMANodeID: 5},
219: {CoreID: 91, SocketID: 1, NUMANodeID: 5},
220: {CoreID: 92, SocketID: 1, NUMANodeID: 5},
221: {CoreID: 93, SocketID: 1, NUMANodeID: 5},
222: {CoreID: 94, SocketID: 1, NUMANodeID: 5},
223: {CoreID: 95, SocketID: 1, NUMANodeID: 5},
224: {CoreID: 96, SocketID: 1, NUMANodeID: 6},
225: {CoreID: 97, SocketID: 1, NUMANodeID: 6},
226: {CoreID: 98, SocketID: 1, NUMANodeID: 6},
227: {CoreID: 99, SocketID: 1, NUMANodeID: 6},
228: {CoreID: 100, SocketID: 1, NUMANodeID: 6},
229: {CoreID: 101, SocketID: 1, NUMANodeID: 6},
230: {CoreID: 102, SocketID: 1, NUMANodeID: 6},
231: {CoreID: 103, SocketID: 1, NUMANodeID: 6},
232: {CoreID: 104, SocketID: 1, NUMANodeID: 6},
233: {CoreID: 105, SocketID: 1, NUMANodeID: 6},
234: {CoreID: 106, SocketID: 1, NUMANodeID: 6},
235: {CoreID: 107, SocketID: 1, NUMANodeID: 6},
236: {CoreID: 108, SocketID: 1, NUMANodeID: 6},
237: {CoreID: 109, SocketID: 1, NUMANodeID: 6},
238: {CoreID: 110, SocketID: 1, NUMANodeID: 6},
239: {CoreID: 111, SocketID: 1, NUMANodeID: 6},
240: {CoreID: 112, SocketID: 1, NUMANodeID: 7},
241: {CoreID: 113, SocketID: 1, NUMANodeID: 7},
242: {CoreID: 114, SocketID: 1, NUMANodeID: 7},
243: {CoreID: 115, SocketID: 1, NUMANodeID: 7},
244: {CoreID: 116, SocketID: 1, NUMANodeID: 7},
245: {CoreID: 117, SocketID: 1, NUMANodeID: 7},
246: {CoreID: 118, SocketID: 1, NUMANodeID: 7},
247: {CoreID: 119, SocketID: 1, NUMANodeID: 7},
248: {CoreID: 120, SocketID: 1, NUMANodeID: 7},
249: {CoreID: 121, SocketID: 1, NUMANodeID: 7},
250: {CoreID: 122, SocketID: 1, NUMANodeID: 7},
251: {CoreID: 123, SocketID: 1, NUMANodeID: 7},
252: {CoreID: 124, SocketID: 1, NUMANodeID: 7},
253: {CoreID: 125, SocketID: 1, NUMANodeID: 7},
254: {CoreID: 126, SocketID: 1, NUMANodeID: 7},
255: {CoreID: 127, SocketID: 1, NUMANodeID: 7},
},
}
)