Merge pull request #130153 from wongchar/uncore-v1.33

node: cpumanager: prefer-align-cpus-by-uncorecache: add test cases and CPU topologies
This commit is contained in:
Kubernetes Prow Robot 2025-03-07 08:45:45 -08:00 committed by GitHub
commit ed99f7dec6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 2369 additions and 0 deletions

View File

@ -22,6 +22,7 @@ import (
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
pkgfeatures "k8s.io/kubernetes/pkg/features"
@ -973,6 +974,7 @@ type staticPolicyTestWithResvList struct {
expNewErr error
expCPUAlloc bool
expCSet cpuset.CPUSet
expUncoreCache cpuset.CPUSet // represents the expected UncoreCacheIDs
}
func TestStaticPolicyStartWithResvList(t *testing.T) {
@ -1147,6 +1149,564 @@ func TestStaticPolicyAddWithResvList(t *testing.T) {
}
}
func WithPodUID(pod *v1.Pod, podUID string) *v1.Pod {
ret := pod.DeepCopy()
ret.UID = types.UID(podUID)
return ret
}
func TestStaticPolicyAddWithUncoreAlignment(t *testing.T) {
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.CPUManagerPolicyAlphaOptions, true)
testCases := []staticPolicyTestWithResvList{
{
description: "GuPodSingleContainerSaturating, DualSocketHTUncore, ExpectAllocOneUncore, FullUncoreAvail",
topo: topoDualSocketSingleNumaPerSocketSMTUncore,
numReservedCPUs: 8,
reserved: cpuset.New(0, 1, 96, 97, 192, 193, 288, 289), // note 4 cpus taken from uncore 0, 4 from uncore 12
cpuPolicyOptions: map[string]string{
FullPCPUsOnlyOption: "true",
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
// remove partially used uncores from the available CPUs to simulate fully clean slate
stDefaultCPUSet: topoDualSocketSingleNumaPerSocketSMTUncore.CPUDetails.CPUs().Difference(
cpuset.New().Union(
topoDualSocketSingleNumaPerSocketSMTUncore.CPUDetails.CPUsInUncoreCaches(0),
).Union(
topoDualSocketSingleNumaPerSocketSMTUncore.CPUDetails.CPUsInUncoreCaches(12),
),
),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"16000m", "16000m"}, // CpusPerUncore=16 with this topology
},
),
"with-app-container-saturating",
),
expUncoreCache: cpuset.New(1),
},
{
description: "GuPodMainAndSidecarContainer, DualSocketHTUncore, ExpectAllocOneUncore, FullUncoreAvail",
topo: topoDualSocketSingleNumaPerSocketSMTUncore,
numReservedCPUs: 8,
reserved: cpuset.New(0, 1, 96, 97, 192, 193, 288, 289), // note 4 cpus taken from uncore 0, 4 from uncore 12
cpuPolicyOptions: map[string]string{
FullPCPUsOnlyOption: "true",
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
// remove partially used uncores from the available CPUs to simulate fully clean slate
stDefaultCPUSet: topoDualSocketSingleNumaPerSocketSMTUncore.CPUDetails.CPUs().Difference(
cpuset.New().Union(
topoDualSocketSingleNumaPerSocketSMTUncore.CPUDetails.CPUsInUncoreCaches(0),
).Union(
topoDualSocketSingleNumaPerSocketSMTUncore.CPUDetails.CPUsInUncoreCaches(12),
),
),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"12000m", "12000m"},
{"2000m", "2000m"},
},
),
"with-app-container-and-sidecar",
),
expUncoreCache: cpuset.New(1),
},
{
description: "GuPodSidecarAndMainContainer, DualSocketHTUncore, ExpectAllocOneUncore, FullUncoreAvail",
topo: topoDualSocketSingleNumaPerSocketSMTUncore,
numReservedCPUs: 8,
reserved: cpuset.New(0, 1, 96, 97, 192, 193, 288, 289), // note 4 cpus taken from uncore 0, 4 from uncore 12
cpuPolicyOptions: map[string]string{
FullPCPUsOnlyOption: "true",
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
// remove partially used uncores from the available CPUs to simulate fully clean slate
stDefaultCPUSet: topoDualSocketSingleNumaPerSocketSMTUncore.CPUDetails.CPUs().Difference(
cpuset.New().Union(
topoDualSocketSingleNumaPerSocketSMTUncore.CPUDetails.CPUsInUncoreCaches(0),
).Union(
topoDualSocketSingleNumaPerSocketSMTUncore.CPUDetails.CPUsInUncoreCaches(12),
),
),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"2000m", "2000m"},
{"12000m", "12000m"},
},
),
"with-sidecar-and-app-container",
),
expUncoreCache: cpuset.New(1),
},
{
description: "GuPodMainAndManySidecarContainer, DualSocketHTUncore, ExpectAllocOneUncore, FullUncoreAvail",
topo: topoDualSocketSingleNumaPerSocketSMTUncore,
numReservedCPUs: 8,
reserved: cpuset.New(0, 1, 96, 97, 192, 193, 288, 289), // note 4 cpus taken from uncore 0, 4 from uncore 12
cpuPolicyOptions: map[string]string{
FullPCPUsOnlyOption: "true",
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
// remove partially used uncores from the available CPUs to simulate fully clean slate
stDefaultCPUSet: topoDualSocketSingleNumaPerSocketSMTUncore.CPUDetails.CPUs().Difference(
cpuset.New().Union(
topoDualSocketSingleNumaPerSocketSMTUncore.CPUDetails.CPUsInUncoreCaches(0),
).Union(
topoDualSocketSingleNumaPerSocketSMTUncore.CPUDetails.CPUsInUncoreCaches(12),
),
),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"10000m", "10000m"},
{"2000m", "2000m"},
{"2000m", "2000m"},
{"2000m", "2000m"},
},
),
"with-app-container-and-multi-sidecar",
),
expUncoreCache: cpuset.New(1),
},
{
description: "GuPodMainAndSidecarContainer, DualSocketHTUncore, ExpectAllocTwoUncore",
topo: topoDualSocketSingleNumaPerSocketSMTUncore,
numReservedCPUs: 8,
reserved: cpuset.New(0, 1, 96, 97, 192, 193, 288, 289), // note 4 cpus taken from uncore 0, 4 from uncore 12
cpuPolicyOptions: map[string]string{
FullPCPUsOnlyOption: "true",
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: topoDualSocketSingleNumaPerSocketSMTUncore.CPUDetails.CPUs(),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"12000m", "12000m"},
{"2000m", "2000m"},
},
),
"with-app-container-and-sidecar",
),
expUncoreCache: cpuset.New(0, 1), // expected CPU alignment to UncoreCacheIDs 0-1
},
{
description: "GuPodSingleContainer, SingleSocketSMTSmallUncore, ExpectAllocOneUncore",
topo: topoSingleSocketSingleNumaPerSocketSMTSmallUncore,
numReservedCPUs: 4,
reserved: cpuset.New(0, 1, 64, 65), // note 4 cpus taken from uncore 0
cpuPolicyOptions: map[string]string{
FullPCPUsOnlyOption: "true",
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: topoSingleSocketSingleNumaPerSocketSMTSmallUncore.CPUDetails.CPUs(),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"8000m", "8000m"},
},
),
"with-app-container-saturating",
),
expUncoreCache: cpuset.New(1),
},
{
// Best-effort policy allows larger containers to be scheduled using a packed method
description: "GuPodSingleContainer, SingleSocketSMTSmallUncore, ExpectAllocTwoUncore",
topo: topoSingleSocketSingleNumaPerSocketSMTSmallUncore, // 8 cpus per uncore
numReservedCPUs: 4,
reserved: cpuset.New(0, 1, 64, 65), // note 4 cpus taken from uncore 0
cpuPolicyOptions: map[string]string{
FullPCPUsOnlyOption: "true",
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
// Uncore 1 fully allocated
stDefaultCPUSet: topoSingleSocketSingleNumaPerSocketSMTSmallUncore.CPUDetails.CPUs().Difference(
topoSingleSocketSingleNumaPerSocketSMTSmallUncore.CPUDetails.CPUsInUncoreCaches(1),
),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"12000m", "12000m"}, // larger than topology's uncore cache
},
),
"with-app-container-saturating",
),
expUncoreCache: cpuset.New(0, 2),
},
{
// Best-effort policy allows larger containers to be scheduled using a packed method
description: "GuPodSingleContainer, SingleSocketNoSMTSmallUncore, FragmentedUncore, ExpectAllocThreeUncore",
topo: topoSingleSocketSingleNumaPerSocketNoSMTSmallUncore, // 4 cpus per uncore
numReservedCPUs: 4,
reserved: cpuset.New(0, 1, 2, 3), // note 4 cpus taken from uncore 0
cpuPolicyOptions: map[string]string{
FullPCPUsOnlyOption: "true",
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
// Uncore 2, 3, and 5 fully allocated
stDefaultCPUSet: topoSingleSocketSingleNumaPerSocketNoSMTSmallUncore.CPUDetails.CPUs().Difference(
cpuset.New().Union(
topoSingleSocketSingleNumaPerSocketNoSMTSmallUncore.CPUDetails.CPUsInUncoreCaches(2),
).Union(
topoSingleSocketSingleNumaPerSocketNoSMTSmallUncore.CPUDetails.CPUsInUncoreCaches(3),
).Union(
topoSingleSocketSingleNumaPerSocketNoSMTSmallUncore.CPUDetails.CPUsInUncoreCaches(5),
),
),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"12000m", "12000m"}, // 3 uncore cache's worth of CPUs
},
),
"with-app-container-saturating",
),
expUncoreCache: cpuset.New(1, 4, 6),
},
{
// Uncore cache alignment following a packed methodology
description: "GuPodMultiContainer, DualSocketSMTUncore, FragmentedUncore, ExpectAllocOneUncore",
topo: topoSmallDualSocketSingleNumaPerSocketNoSMTUncore, // 8 cpus per uncore
numReservedCPUs: 4,
reserved: cpuset.New(0, 1, 32, 33), // note 2 cpus taken from uncore 0, 2 from uncore 4
cpuPolicyOptions: map[string]string{
FullPCPUsOnlyOption: "true",
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
// uncore 1 fully allocated
stDefaultCPUSet: topoSmallDualSocketSingleNumaPerSocketNoSMTUncore.CPUDetails.CPUs().Difference(
topoSmallDualSocketSingleNumaPerSocketNoSMTUncore.CPUDetails.CPUsInUncoreCaches(1),
),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"4000m", "4000m"},
{"2000m", "2000m"},
},
),
"with-multiple-container",
),
expUncoreCache: cpuset.New(0),
},
{
// Uncore cache alignment following a packed methodology
description: "GuPodMultiContainer, DualSocketSMTUncore, FragmentedUncore, ExpectAllocTwoUncore",
topo: topoSmallDualSocketSingleNumaPerSocketNoSMTUncore, // 8 cpus per uncore
numReservedCPUs: 4,
reserved: cpuset.New(0, 1, 32, 33), // note 2 cpus taken from uncore 0, 2 from uncore 4
cpuPolicyOptions: map[string]string{
FullPCPUsOnlyOption: "true",
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
// uncore 1 fully allocated
stDefaultCPUSet: topoSmallDualSocketSingleNumaPerSocketNoSMTUncore.CPUDetails.CPUs().Difference(
topoSmallDualSocketSingleNumaPerSocketNoSMTUncore.CPUDetails.CPUsInUncoreCaches(1),
),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"4000m", "4000m"},
{"4000m", "4000m"},
},
),
"with-multiple-container",
),
expUncoreCache: cpuset.New(0, 2),
},
{
// CPU assignments able to fit on partially available uncore cache
description: "GuPodMultiContainer, LargeSingleSocketSMTUncore, PartialUncoreFit, ExpectAllocTwoUncore",
topo: topoLargeSingleSocketSingleNumaPerSocketSMTUncore, // 16 cpus per uncore
numReservedCPUs: 4,
reserved: cpuset.New(0, 1, 128, 129), // note 4 cpus taken from uncore 0
cpuPolicyOptions: map[string]string{
FullPCPUsOnlyOption: "true",
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
// 4 cpus allocated from uncore 1
stDefaultCPUSet: topoLargeSingleSocketSingleNumaPerSocketSMTUncore.CPUDetails.CPUs().Difference(
cpuset.New(8, 9, 136, 137),
),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"12000m", "12000m"},
{"12000m", "12000m"},
},
),
"with-multiple-container",
),
expUncoreCache: cpuset.New(0, 1),
},
{
// CPU assignments unable to fit on partially available uncore cache
description: "GuPodMultiContainer, LargeSingleSocketSMTUncore, PartialUncoreNoFit, ExpectAllocTwoUncore",
topo: topoLargeSingleSocketSingleNumaPerSocketSMTUncore, // 16 cpus per uncore
numReservedCPUs: 4,
reserved: cpuset.New(0, 1, 128, 129), // note 4 cpus taken from uncore 0
cpuPolicyOptions: map[string]string{
FullPCPUsOnlyOption: "true",
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
// 4 cpus allocated from uncore 1
stDefaultCPUSet: topoLargeSingleSocketSingleNumaPerSocketSMTUncore.CPUDetails.CPUs().Difference(
cpuset.New(8, 9, 136, 137),
),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"14000m", "14000m"},
{"14000m", "14000m"},
},
),
"with-multiple-container",
),
expUncoreCache: cpuset.New(2, 3),
},
{
// Full NUMA allocation on split-cache architecture with NPS=2
description: "GuPodLargeSingleContainer, DualSocketNoSMTUncore, FullNUMAsAvail, ExpectAllocFullNUMA",
topo: topoDualSocketMultiNumaPerSocketUncore, // 8 cpus per uncore
numReservedCPUs: 4,
reserved: cpuset.New(0, 1, 2, 3), // note 4 cpus taken from uncore 0
cpuPolicyOptions: map[string]string{
FullPCPUsOnlyOption: "true",
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: topoDualSocketMultiNumaPerSocketUncore.CPUDetails.CPUs(),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"48000m", "48000m"}, // NUMA's worth of CPUs
},
),
"with-large-single-container",
),
expUncoreCache: cpuset.New(6, 7, 8, 9, 10, 11), // uncore caches of NUMA Node 1
},
{
// PreferAlignByUnCoreCacheOption will not impact monolithic x86 architectures
description: "GuPodSingleContainer, MonoUncoreCacheHT, ExpectAllocCPUSet",
topo: topoDualSocketSubNumaPerSocketHTMonolithicUncore, // Uncore cache CPUs = Socket CPUs
numReservedCPUs: 4,
reserved: cpuset.New(0, 1, 120, 121), // note 4 cpus taken from first 2 cores
cpuPolicyOptions: map[string]string{
FullPCPUsOnlyOption: "true",
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: topoDualSocketSubNumaPerSocketHTMonolithicUncore.CPUDetails.CPUs(),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"6000m", "6000m"},
},
),
"with-single-container",
),
expCPUAlloc: true,
expCSet: cpuset.New(2, 3, 4, 122, 123, 124),
expUncoreCache: cpuset.New(0),
},
{
// PreferAlignByUnCoreCacheOption on fragmented monolithic cache x86 architectures
description: "GuPodSingleContainer, MonoUncoreCacheHT, ExpectAllocCPUSet",
topo: topoSingleSocketSingleNumaPerSocketPCoreHTMonolithicUncore, // Uncore cache CPUs = Socket CPUs
numReservedCPUs: 2,
reserved: cpuset.New(0, 1),
cpuPolicyOptions: map[string]string{
FullPCPUsOnlyOption: "true",
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
// CPUs 4-7 allocated
stDefaultCPUSet: topoSingleSocketSingleNumaPerSocketPCoreHTMonolithicUncore.CPUDetails.CPUs().Difference(
cpuset.New(4, 5, 6, 7),
),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"6000m", "6000m"},
},
),
"with-single-container",
),
expCPUAlloc: true,
expCSet: cpuset.New(2, 3, 8, 9, 16, 17), // identical to default packed assignment
expUncoreCache: cpuset.New(0),
},
{
// Compatibility with ARM-based split cache architectures
description: "GuPodSingleContainer, LargeSingleSocketUncore, ExpectAllocOneUncore",
topo: topoLargeSingleSocketSingleNumaPerSocketUncore, // 8 cpus per uncore
numReservedCPUs: 4,
reserved: cpuset.New(0, 1, 2, 3), // note 4 cpus taken from uncore 0
cpuPolicyOptions: map[string]string{
FullPCPUsOnlyOption: "true",
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
stDefaultCPUSet: topoLargeSingleSocketSingleNumaPerSocketUncore.CPUDetails.CPUs(),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"8000m", "8000m"},
},
),
"with-single-container",
),
expUncoreCache: cpuset.New(1),
},
{
// PreferAlignByUnCoreCacheOption on fragmented monolithic cache ARM architectures
description: "GuPodSingleContainer, MonoUncoreCacheHT, ExpectFragmentedAllocCPUSet",
topo: topoSingleSocketSingleNumaPerSocketUncore, // Uncore cache CPUs = Socket CPUs
numReservedCPUs: 2,
reserved: cpuset.New(0, 1),
cpuPolicyOptions: map[string]string{
FullPCPUsOnlyOption: "true",
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
// CPUs 6-9, 12-15, 18-19 allocated
stDefaultCPUSet: topoSingleSocketSingleNumaPerSocketUncore.CPUDetails.CPUs().Difference(
cpuset.New().Union(
cpuset.New(6, 7, 8, 9),
).Union(
cpuset.New(12, 13, 14, 15),
).Union(
cpuset.New(18, 19),
),
),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"12000m", "12000m"},
},
),
"with-single-container",
),
expCPUAlloc: true,
expCSet: cpuset.New(2, 3, 4, 5, 10, 11, 16, 17, 20, 21, 22, 23), // identical to default packed assignment
expUncoreCache: cpuset.New(0),
},
{
// Best-effort policy can result in multiple uncore caches
// Every uncore cache is partially allocated
description: "GuPodSingleContainer, SingleSocketUncore, PartialUncore, ExpectBestEffortAllocTwoUncore",
topo: topoSmallSingleSocketSingleNumaPerSocketNoSMTUncore, // 8 cpus per uncore
numReservedCPUs: 4,
reserved: cpuset.New(0, 1, 2, 3), // note 4 cpus taken from uncore 0
cpuPolicyOptions: map[string]string{
FullPCPUsOnlyOption: "true",
PreferAlignByUnCoreCacheOption: "true",
},
stAssignments: state.ContainerCPUAssignments{},
// Every uncore has partially allocated 4 CPUs
stDefaultCPUSet: topoSmallSingleSocketSingleNumaPerSocketNoSMTUncore.CPUDetails.CPUs().Difference(
cpuset.New().Union(
cpuset.New(8, 9, 10, 11),
).Union(
cpuset.New(16, 17, 18, 19),
).Union(
cpuset.New(24, 25, 26, 27),
),
),
pod: WithPodUID(
makeMultiContainerPod(
[]struct{ request, limit string }{}, // init container
[]struct{ request, limit string }{ // app container
{"8000m", "8000m"}, // full uncore cache worth of cpus
},
),
"with-single-container",
),
expUncoreCache: cpuset.New(0, 1), // best-effort across uncore cache 0 and 1
},
}
for _, testCase := range testCases {
t.Run(testCase.description, func(t *testing.T) {
policy, err := NewStaticPolicy(testCase.topo, testCase.numReservedCPUs, testCase.reserved, topologymanager.NewFakeManager(), testCase.cpuPolicyOptions)
if err != nil {
t.Fatalf("NewStaticPolicy() failed with %v", err)
}
st := &mockState{
assignments: testCase.stAssignments,
defaultCPUSet: testCase.stDefaultCPUSet.Difference(testCase.reserved), // ensure the cpumanager invariant
}
for idx := range testCase.pod.Spec.Containers {
container := &testCase.pod.Spec.Containers[idx]
err := policy.Allocate(st, testCase.pod, container)
if err != nil {
t.Fatalf("Allocate failed: pod=%q container=%q", testCase.pod.UID, container.Name)
}
}
if testCase.expCPUAlloc {
container := &testCase.pod.Spec.Containers[0]
cset, found := st.assignments[string(testCase.pod.UID)][container.Name]
if !found {
t.Errorf("StaticPolicy Allocate() error (%v). expected container %v to be present in assignments %v",
testCase.description, container.Name, st.assignments)
}
if !testCase.expCSet.Equals(cset) {
t.Errorf("StaticPolicy Allocate() error (%v). expected CPUSet %v but got %v",
testCase.description, testCase.expCSet, cset)
}
return
}
uncoreCacheIDs, err := getPodUncoreCacheIDs(st, testCase.topo, testCase.pod)
if err != nil {
t.Fatalf("uncore cache check: %v", err.Error())
}
ids := cpuset.New(uncoreCacheIDs...)
if !ids.Equals(testCase.expUncoreCache) {
t.Errorf("StaticPolicy Allocate() error (%v). expected UncoreCacheIDs %v but got %v",
testCase.description, testCase.expUncoreCache, ids)
}
})
}
}
type staticPolicyOptionTestCase struct {
description string
policyOptions map[string]string
@ -1274,3 +1834,22 @@ func newCPUSetPtr(cpus ...int) *cpuset.CPUSet {
ret := cpuset.New(cpus...)
return &ret
}
func getPodUncoreCacheIDs(s state.Reader, topo *topology.CPUTopology, pod *v1.Pod) ([]int, error) {
var uncoreCacheIDs []int
for idx := range pod.Spec.Containers {
container := &pod.Spec.Containers[idx]
cset, ok := s.GetCPUSet(string(pod.UID), container.Name)
if !ok {
return nil, fmt.Errorf("GetCPUSet(%s, %s) not ok", pod.UID, container.Name)
}
for _, cpuID := range cset.UnsortedList() {
info, ok := topo.CPUDetails[cpuID]
if !ok {
return nil, fmt.Errorf("cpuID %v not in topo.CPUDetails", cpuID)
}
uncoreCacheIDs = append(uncoreCacheIDs, info.UncoreCacheID)
}
}
return uncoreCacheIDs, nil
}

File diff suppressed because it is too large Load Diff