mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 02:11:09 +00:00
cpuset: Make 'ToSlice*' methods look like 'set' methods
In 'set', conversions to slice are done also, but with different names: ToSliceNoSort() -> UnsortedList() ToSlice() -> List() Reimplement List() in terms of UnsortedList to save some duplication.
This commit is contained in:
parent
a0c989b99a
commit
e5143d16c2
@ -982,14 +982,14 @@ func int64Slice(in []int) []int64 {
|
||||
|
||||
func (cm *containerManagerImpl) GetCPUs(podUID, containerName string) []int64 {
|
||||
if cm.cpuManager != nil {
|
||||
return int64Slice(cm.cpuManager.GetExclusiveCPUs(podUID, containerName).ToSliceNoSort())
|
||||
return int64Slice(cm.cpuManager.GetExclusiveCPUs(podUID, containerName).UnsortedList())
|
||||
}
|
||||
return []int64{}
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) GetAllocatableCPUs() []int64 {
|
||||
if cm.cpuManager != nil {
|
||||
return int64Slice(cm.cpuManager.GetAllocatableCPUs().ToSliceNoSort())
|
||||
return int64Slice(cm.cpuManager.GetAllocatableCPUs().UnsortedList())
|
||||
}
|
||||
return []int64{}
|
||||
}
|
||||
|
@ -128,7 +128,7 @@ func (n *numaFirst) takeFullSecondLevel() {
|
||||
// If NUMA nodes are higher in the memory hierarchy than sockets, then just
|
||||
// sort the NUMA nodes directly, and return them.
|
||||
func (n *numaFirst) sortAvailableNUMANodes() []int {
|
||||
numas := n.acc.details.NUMANodes().ToSliceNoSort()
|
||||
numas := n.acc.details.NUMANodes().UnsortedList()
|
||||
n.acc.sort(numas, n.acc.details.CPUsInNUMANodes)
|
||||
return numas
|
||||
}
|
||||
@ -139,7 +139,7 @@ func (n *numaFirst) sortAvailableNUMANodes() []int {
|
||||
func (n *numaFirst) sortAvailableSockets() []int {
|
||||
var result []int
|
||||
for _, numa := range n.sortAvailableNUMANodes() {
|
||||
sockets := n.acc.details.SocketsInNUMANodes(numa).ToSliceNoSort()
|
||||
sockets := n.acc.details.SocketsInNUMANodes(numa).UnsortedList()
|
||||
n.acc.sort(sockets, n.acc.details.CPUsInSockets)
|
||||
result = append(result, sockets...)
|
||||
}
|
||||
@ -151,7 +151,7 @@ func (n *numaFirst) sortAvailableSockets() []int {
|
||||
func (n *numaFirst) sortAvailableCores() []int {
|
||||
var result []int
|
||||
for _, socket := range n.acc.sortAvailableSockets() {
|
||||
cores := n.acc.details.CoresInSockets(socket).ToSliceNoSort()
|
||||
cores := n.acc.details.CoresInSockets(socket).UnsortedList()
|
||||
n.acc.sort(cores, n.acc.details.CPUsInCores)
|
||||
result = append(result, cores...)
|
||||
}
|
||||
@ -176,7 +176,7 @@ func (s *socketsFirst) takeFullSecondLevel() {
|
||||
func (s *socketsFirst) sortAvailableNUMANodes() []int {
|
||||
var result []int
|
||||
for _, socket := range s.sortAvailableSockets() {
|
||||
numas := s.acc.details.NUMANodesInSockets(socket).ToSliceNoSort()
|
||||
numas := s.acc.details.NUMANodesInSockets(socket).UnsortedList()
|
||||
s.acc.sort(numas, s.acc.details.CPUsInNUMANodes)
|
||||
result = append(result, numas...)
|
||||
}
|
||||
@ -186,7 +186,7 @@ func (s *socketsFirst) sortAvailableNUMANodes() []int {
|
||||
// If sockets are higher in the memory hierarchy than NUMA nodes, then just
|
||||
// sort the sockets directly, and return them.
|
||||
func (s *socketsFirst) sortAvailableSockets() []int {
|
||||
sockets := s.acc.details.Sockets().ToSliceNoSort()
|
||||
sockets := s.acc.details.Sockets().UnsortedList()
|
||||
s.acc.sort(sockets, s.acc.details.CPUsInSockets)
|
||||
return sockets
|
||||
}
|
||||
@ -196,7 +196,7 @@ func (s *socketsFirst) sortAvailableSockets() []int {
|
||||
func (s *socketsFirst) sortAvailableCores() []int {
|
||||
var result []int
|
||||
for _, numa := range s.acc.sortAvailableNUMANodes() {
|
||||
cores := s.acc.details.CoresInNUMANodes(numa).ToSliceNoSort()
|
||||
cores := s.acc.details.CoresInNUMANodes(numa).UnsortedList()
|
||||
s.acc.sort(cores, s.acc.details.CPUsInCores)
|
||||
result = append(result, cores...)
|
||||
}
|
||||
@ -323,7 +323,7 @@ func (a *cpuAccumulator) sortAvailableCores() []int {
|
||||
func (a *cpuAccumulator) sortAvailableCPUs() []int {
|
||||
var result []int
|
||||
for _, core := range a.sortAvailableCores() {
|
||||
cpus := a.details.CPUsInCores(core).ToSliceNoSort()
|
||||
cpus := a.details.CPUsInCores(core).UnsortedList()
|
||||
sort.Ints(cpus)
|
||||
result = append(result, cpus...)
|
||||
}
|
||||
|
@ -544,7 +544,7 @@ func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, reu
|
||||
|
||||
// Iterate through all combinations of numa nodes bitmask and build hints from them.
|
||||
hints := []topologymanager.TopologyHint{}
|
||||
bitmask.IterateBitMasks(p.topology.CPUDetails.NUMANodes().ToSlice(), func(mask bitmask.BitMask) {
|
||||
bitmask.IterateBitMasks(p.topology.CPUDetails.NUMANodes().List(), func(mask bitmask.BitMask) {
|
||||
// First, update minAffinitySize for the current request size.
|
||||
cpusInMask := p.topology.CPUDetails.CPUsInNUMANodes(mask.GetBits()...).Size()
|
||||
if cpusInMask >= request && mask.Count() < minAffinitySize {
|
||||
@ -554,7 +554,7 @@ func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, reu
|
||||
// Then check to see if we have enough CPUs available on the current
|
||||
// numa node bitmask to satisfy the CPU request.
|
||||
numMatching := 0
|
||||
for _, c := range reusableCPUs.ToSlice() {
|
||||
for _, c := range reusableCPUs.List() {
|
||||
// Disregard this mask if its NUMANode isn't part of it.
|
||||
if !mask.IsSet(p.topology.CPUDetails[c].NUMANodeID) {
|
||||
return
|
||||
@ -564,7 +564,7 @@ func (p *staticPolicy) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, reu
|
||||
|
||||
// Finally, check to see if enough available CPUs remain on the current
|
||||
// NUMA node combination to satisfy the CPU request.
|
||||
for _, c := range availableCPUs.ToSlice() {
|
||||
for _, c := range availableCPUs.List() {
|
||||
if mask.IsSet(p.topology.CPUDetails[c].NUMANodeID) {
|
||||
numMatching++
|
||||
}
|
||||
@ -623,7 +623,7 @@ func (p *staticPolicy) getAlignedCPUs(numaAffinity bitmask.BitMask, allocatableC
|
||||
// socket aligned hint. It will ensure that first socket aligned available CPUs are
|
||||
// allocated before we try to find CPUs across socket to satisfy allocation request.
|
||||
if p.options.AlignBySocket {
|
||||
socketBits := p.topology.CPUDetails.SocketsInNUMANodes(numaBits...).ToSliceNoSort()
|
||||
socketBits := p.topology.CPUDetails.SocketsInNUMANodes(numaBits...).UnsortedList()
|
||||
for _, socketID := range socketBits {
|
||||
alignedCPUs = alignedCPUs.Union(allocatableCPUs.Intersection(p.topology.CPUDetails.CPUsInSockets(socketID)))
|
||||
}
|
||||
|
@ -162,20 +162,17 @@ func (s CPUSet) Difference(s2 CPUSet) CPUSet {
|
||||
return s.FilterNot(func(cpu int) bool { return s2.Contains(cpu) })
|
||||
}
|
||||
|
||||
// ToSlice returns a slice of integers that contains all elements from
|
||||
// this set.
|
||||
func (s CPUSet) ToSlice() []int {
|
||||
result := make([]int, 0, len(s.elems))
|
||||
for cpu := range s.elems {
|
||||
result = append(result, cpu)
|
||||
}
|
||||
// List returns a slice of integers that contains all elements from
|
||||
// this set. The list is sorted.
|
||||
func (s CPUSet) List() []int {
|
||||
result := s.UnsortedList()
|
||||
sort.Ints(result)
|
||||
return result
|
||||
}
|
||||
|
||||
// ToSliceNoSort returns a slice of integers that contains all elements from
|
||||
// UnsortedList returns a slice of integers that contains all elements from
|
||||
// this set.
|
||||
func (s CPUSet) ToSliceNoSort() []int {
|
||||
func (s CPUSet) UnsortedList() []int {
|
||||
result := make([]int, 0, len(s.elems))
|
||||
for cpu := range s.elems {
|
||||
result = append(result, cpu)
|
||||
@ -192,7 +189,7 @@ func (s CPUSet) String() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
elems := s.ToSlice()
|
||||
elems := s.List()
|
||||
|
||||
type rng struct {
|
||||
start int
|
||||
|
@ -274,7 +274,7 @@ func TestCPUSetDifference(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestCPUSetToSlice(t *testing.T) {
|
||||
func TestCPUSetList(t *testing.T) {
|
||||
testCases := []struct {
|
||||
set CPUSet
|
||||
expected []int
|
||||
@ -285,7 +285,7 @@ func TestCPUSetToSlice(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, c := range testCases {
|
||||
result := c.set.ToSlice()
|
||||
result := c.set.List()
|
||||
if !reflect.DeepEqual(result, c.expected) {
|
||||
t.Fatalf("expected set as slice to be [%v] (got [%v]), s: [%v]", c.expected, result, c.set)
|
||||
}
|
||||
|
@ -318,10 +318,10 @@ func runMultipleGuNonGuPods(ctx context.Context, f *framework.Framework, cpuCap
|
||||
ginkgo.By("checking if the expected cpuset was assigned")
|
||||
cpu1 = 1
|
||||
if isHTEnabled() {
|
||||
cpuList = mustParseCPUSet(getCPUSiblingList(0)).ToSlice()
|
||||
cpuList = mustParseCPUSet(getCPUSiblingList(0)).List()
|
||||
cpu1 = cpuList[1]
|
||||
} else if isMultiNUMA() {
|
||||
cpuList = mustParseCPUSet(getCoreSiblingList(0)).ToSlice()
|
||||
cpuList = mustParseCPUSet(getCoreSiblingList(0)).List()
|
||||
if len(cpuList) > 1 {
|
||||
cpu1 = cpuList[1]
|
||||
}
|
||||
@ -367,7 +367,7 @@ func runMultipleCPUGuPod(ctx context.Context, f *framework.Framework) {
|
||||
ginkgo.By("checking if the expected cpuset was assigned")
|
||||
cpuListString = "1-2"
|
||||
if isMultiNUMA() {
|
||||
cpuList = mustParseCPUSet(getCoreSiblingList(0)).ToSlice()
|
||||
cpuList = mustParseCPUSet(getCoreSiblingList(0)).List()
|
||||
if len(cpuList) > 1 {
|
||||
cset = mustParseCPUSet(getCPUSiblingList(int64(cpuList[1])))
|
||||
if !isHTEnabled() && len(cpuList) > 2 {
|
||||
@ -377,7 +377,7 @@ func runMultipleCPUGuPod(ctx context.Context, f *framework.Framework) {
|
||||
}
|
||||
} else if isHTEnabled() {
|
||||
cpuListString = "2-3"
|
||||
cpuList = mustParseCPUSet(getCPUSiblingList(0)).ToSlice()
|
||||
cpuList = mustParseCPUSet(getCPUSiblingList(0)).List()
|
||||
if cpuList[1] != 1 {
|
||||
cset = mustParseCPUSet(getCPUSiblingList(1))
|
||||
cpuListString = fmt.Sprintf("%s", cset)
|
||||
@ -418,18 +418,18 @@ func runMultipleCPUContainersGuPod(ctx context.Context, f *framework.Framework)
|
||||
ginkgo.By("checking if the expected cpuset was assigned")
|
||||
cpu1, cpu2 = 1, 2
|
||||
if isHTEnabled() {
|
||||
cpuList = mustParseCPUSet(getCPUSiblingList(0)).ToSlice()
|
||||
cpuList = mustParseCPUSet(getCPUSiblingList(0)).List()
|
||||
if cpuList[1] != 1 {
|
||||
cpu1, cpu2 = cpuList[1], 1
|
||||
}
|
||||
if isMultiNUMA() {
|
||||
cpuList = mustParseCPUSet(getCoreSiblingList(0)).ToSlice()
|
||||
cpuList = mustParseCPUSet(getCoreSiblingList(0)).List()
|
||||
if len(cpuList) > 1 {
|
||||
cpu2 = cpuList[1]
|
||||
}
|
||||
}
|
||||
} else if isMultiNUMA() {
|
||||
cpuList = mustParseCPUSet(getCoreSiblingList(0)).ToSlice()
|
||||
cpuList = mustParseCPUSet(getCoreSiblingList(0)).List()
|
||||
if len(cpuList) > 2 {
|
||||
cpu1, cpu2 = cpuList[1], cpuList[2]
|
||||
}
|
||||
@ -480,18 +480,18 @@ func runMultipleGuPods(ctx context.Context, f *framework.Framework) {
|
||||
ginkgo.By("checking if the expected cpuset was assigned")
|
||||
cpu1, cpu2 = 1, 2
|
||||
if isHTEnabled() {
|
||||
cpuList = mustParseCPUSet(getCPUSiblingList(0)).ToSlice()
|
||||
cpuList = mustParseCPUSet(getCPUSiblingList(0)).List()
|
||||
if cpuList[1] != 1 {
|
||||
cpu1, cpu2 = cpuList[1], 1
|
||||
}
|
||||
if isMultiNUMA() {
|
||||
cpuList = mustParseCPUSet(getCoreSiblingList(0)).ToSlice()
|
||||
cpuList = mustParseCPUSet(getCoreSiblingList(0)).List()
|
||||
if len(cpuList) > 1 {
|
||||
cpu2 = cpuList[1]
|
||||
}
|
||||
}
|
||||
} else if isMultiNUMA() {
|
||||
cpuList = mustParseCPUSet(getCoreSiblingList(0)).ToSlice()
|
||||
cpuList = mustParseCPUSet(getCoreSiblingList(0)).List()
|
||||
if len(cpuList) > 2 {
|
||||
cpu1, cpu2 = cpuList[1], cpuList[2]
|
||||
}
|
||||
@ -588,10 +588,10 @@ func runCPUManagerTests(f *framework.Framework) {
|
||||
ginkgo.By("checking if the expected cpuset was assigned")
|
||||
cpu1 = 1
|
||||
if isHTEnabled() {
|
||||
cpuList = mustParseCPUSet(getCPUSiblingList(0)).ToSlice()
|
||||
cpuList = mustParseCPUSet(getCPUSiblingList(0)).List()
|
||||
cpu1 = cpuList[1]
|
||||
} else if isMultiNUMA() {
|
||||
cpuList = mustParseCPUSet(getCoreSiblingList(0)).ToSlice()
|
||||
cpuList = mustParseCPUSet(getCoreSiblingList(0)).List()
|
||||
if len(cpuList) > 1 {
|
||||
cpu1 = cpuList[1]
|
||||
}
|
||||
@ -734,10 +734,10 @@ func validateSMTAlignment(cpus cpuset.CPUSet, smtLevel int, pod *v1.Pod, cnt *v1
|
||||
// to do so the easiest way is to rebuild the expected set of siblings from all the cpus we got.
|
||||
// if the expected set matches the given set, the given set was good.
|
||||
b := cpuset.NewBuilder()
|
||||
for _, cpuID := range cpus.ToSliceNoSort() {
|
||||
for _, cpuID := range cpus.UnsortedList() {
|
||||
threadSiblings, err := cpuset.Parse(strings.TrimSpace(getCPUSiblingList(int64(cpuID))))
|
||||
framework.ExpectNoError(err, "parsing cpuset from logs for [%s] of pod [%s]", cnt.Name, pod.Name)
|
||||
b.Add(threadSiblings.ToSliceNoSort()...)
|
||||
b.Add(threadSiblings.UnsortedList()...)
|
||||
}
|
||||
siblingsCPUs := b.Result()
|
||||
|
||||
|
@ -280,7 +280,7 @@ var _ = SIGDescribe("Memory Manager [Disruptive] [Serial] [Feature:MemoryManager
|
||||
currentNUMANodeIDs, err := cpuset.Parse(strings.Trim(output, "\n"))
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.ExpectEqual(numaNodeIDs, currentNUMANodeIDs.ToSlice())
|
||||
framework.ExpectEqual(numaNodeIDs, currentNUMANodeIDs.List())
|
||||
}
|
||||
|
||||
waitingForHugepages := func(ctx context.Context, hugepagesCount int) {
|
||||
|
@ -86,7 +86,7 @@ func getCPUsPerNUMANode(nodeNum int) ([]int, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cpus.ToSlice(), nil
|
||||
return cpus.List(), nil
|
||||
}
|
||||
|
||||
func getCPUToNUMANodeMapFromEnv(f *framework.Framework, pod *v1.Pod, cnt *v1.Container, environ map[string]string, numaNodes int) (map[int]int, error) {
|
||||
@ -99,7 +99,7 @@ func getCPUToNUMANodeMapFromEnv(f *framework.Framework, pod *v1.Pod, cnt *v1.Con
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpuIDs = cpus.ToSlice()
|
||||
cpuIDs = cpus.List()
|
||||
}
|
||||
}
|
||||
if len(cpuIDs) == 0 {
|
||||
@ -115,7 +115,7 @@ func getCPUToNUMANodeMapFromEnv(f *framework.Framework, pod *v1.Pod, cnt *v1.Con
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cpusPerNUMA[numaNode] = cpus.ToSlice()
|
||||
cpusPerNUMA[numaNode] = cpus.List()
|
||||
}
|
||||
|
||||
// CPU IDs -> NUMA Node ID
|
||||
|
Loading…
Reference in New Issue
Block a user