mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-04 01:40:07 +00:00
Support CPU and Memory manager on Windows
Signed-off-by: James Sturtevant <jsturtevant@gmail.com>
This commit is contained in:
parent
ed67bd77a8
commit
4d25c25eb0
@ -196,6 +196,14 @@ type Status struct {
|
|||||||
SoftRequirements error
|
SoftRequirements error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func int64Slice(in []int) []int64 {
|
||||||
|
out := make([]int64, len(in))
|
||||||
|
for i := range in {
|
||||||
|
out[i] = int64(in[i])
|
||||||
|
}
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
// parsePercentage parses the percentage string to numeric value.
|
// parsePercentage parses the percentage string to numeric value.
|
||||||
func parsePercentage(v string) (int64, error) {
|
func parsePercentage(v string) (int64, error) {
|
||||||
if !strings.HasSuffix(v, "%") {
|
if !strings.HasSuffix(v, "%") {
|
||||||
|
@ -912,14 +912,6 @@ func (cm *containerManagerImpl) GetAllocatableDevices() []*podresourcesapi.Conta
|
|||||||
return containerDevicesFromResourceDeviceInstances(cm.deviceManager.GetAllocatableDevices())
|
return containerDevicesFromResourceDeviceInstances(cm.deviceManager.GetAllocatableDevices())
|
||||||
}
|
}
|
||||||
|
|
||||||
func int64Slice(in []int) []int64 {
|
|
||||||
out := make([]int64, len(in))
|
|
||||||
for i := range in {
|
|
||||||
out[i] = int64(in[i])
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cm *containerManagerImpl) GetCPUs(podUID, containerName string) []int64 {
|
func (cm *containerManagerImpl) GetCPUs(podUID, containerName string) []int64 {
|
||||||
if cm.cpuManager != nil {
|
if cm.cpuManager != nil {
|
||||||
return int64Slice(cm.cpuManager.GetExclusiveCPUs(podUID, containerName).UnsortedList())
|
return int64Slice(cm.cpuManager.GetExclusiveCPUs(podUID, containerName).UnsortedList())
|
||||||
|
@ -25,6 +25,9 @@ package cm
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
|
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/mount-utils"
|
"k8s.io/mount-utils"
|
||||||
@ -37,7 +40,6 @@ import (
|
|||||||
internalapi "k8s.io/cri-api/pkg/apis"
|
internalapi "k8s.io/cri-api/pkg/apis"
|
||||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/admission"
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
|
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager"
|
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager"
|
||||||
@ -62,12 +64,10 @@ type containerManagerImpl struct {
|
|||||||
deviceManager devicemanager.Manager
|
deviceManager devicemanager.Manager
|
||||||
// Interface for Topology resource co-ordination
|
// Interface for Topology resource co-ordination
|
||||||
topologyManager topologymanager.Manager
|
topologyManager topologymanager.Manager
|
||||||
}
|
cpuManager cpumanager.Manager
|
||||||
|
memoryManager memorymanager.Manager
|
||||||
type noopWindowsResourceAllocator struct{}
|
nodeInfo *v1.Node
|
||||||
|
sync.RWMutex
|
||||||
func (ra *noopWindowsResourceAllocator) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
|
|
||||||
return admission.GetPodAdmitResult(nil)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *containerManagerImpl) Start(ctx context.Context, node *v1.Node,
|
func (cm *containerManagerImpl) Start(ctx context.Context, node *v1.Node,
|
||||||
@ -78,6 +78,8 @@ func (cm *containerManagerImpl) Start(ctx context.Context, node *v1.Node,
|
|||||||
localStorageCapacityIsolation bool) error {
|
localStorageCapacityIsolation bool) error {
|
||||||
klog.V(2).InfoS("Starting Windows container manager")
|
klog.V(2).InfoS("Starting Windows container manager")
|
||||||
|
|
||||||
|
cm.nodeInfo = node
|
||||||
|
|
||||||
if localStorageCapacityIsolation {
|
if localStorageCapacityIsolation {
|
||||||
rootfs, err := cm.cadvisorInterface.RootFsInfo()
|
rootfs, err := cm.cadvisorInterface.RootFsInfo()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -90,6 +92,21 @@ func (cm *containerManagerImpl) Start(ctx context.Context, node *v1.Node,
|
|||||||
|
|
||||||
containerMap, containerRunningSet := buildContainerMapAndRunningSetFromRuntime(ctx, runtimeService)
|
containerMap, containerRunningSet := buildContainerMapAndRunningSetFromRuntime(ctx, runtimeService)
|
||||||
|
|
||||||
|
// Initialize CPU manager
|
||||||
|
err := cm.cpuManager.Start(cpumanager.ActivePodsFunc(activePods), sourcesReady, podStatusProvider, runtimeService, containerMap)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("start cpu manager error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize memory manager
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.MemoryManager) {
|
||||||
|
containerMap, _ := buildContainerMapAndRunningSetFromRuntime(ctx, runtimeService)
|
||||||
|
err := cm.memoryManager.Start(memorymanager.ActivePodsFunc(activePods), sourcesReady, podStatusProvider, runtimeService, containerMap)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("start memory manager error: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Starts device manager.
|
// Starts device manager.
|
||||||
if err := cm.deviceManager.Start(devicemanager.ActivePodsFunc(activePods), sourcesReady, containerMap, containerRunningSet); err != nil {
|
if err := cm.deviceManager.Start(devicemanager.ActivePodsFunc(activePods), sourcesReady, containerMap, containerRunningSet); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -115,7 +132,15 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
|
|||||||
cadvisorInterface: cadvisorInterface,
|
cadvisorInterface: cadvisorInterface,
|
||||||
}
|
}
|
||||||
|
|
||||||
cm.topologyManager = topologymanager.NewFakeManager()
|
klog.InfoS("Creating topology manager")
|
||||||
|
cm.topologyManager, err = topologymanager.NewManager(machineInfo.Topology,
|
||||||
|
nodeConfig.TopologyManagerPolicy,
|
||||||
|
nodeConfig.TopologyManagerScope,
|
||||||
|
nodeConfig.TopologyManagerPolicyOptions)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
klog.InfoS("Creating device plugin manager")
|
klog.InfoS("Creating device plugin manager")
|
||||||
cm.deviceManager, err = devicemanager.NewManagerImpl(nil, cm.topologyManager)
|
cm.deviceManager, err = devicemanager.NewManagerImpl(nil, cm.topologyManager)
|
||||||
@ -124,6 +149,40 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
|
|||||||
}
|
}
|
||||||
cm.topologyManager.AddHintProvider(cm.deviceManager)
|
cm.topologyManager.AddHintProvider(cm.deviceManager)
|
||||||
|
|
||||||
|
klog.InfoS("Creating cpu manager")
|
||||||
|
cm.cpuManager, err = cpumanager.NewManager(
|
||||||
|
nodeConfig.CPUManagerPolicy,
|
||||||
|
nodeConfig.CPUManagerPolicyOptions,
|
||||||
|
nodeConfig.CPUManagerReconcilePeriod,
|
||||||
|
machineInfo,
|
||||||
|
nodeConfig.NodeAllocatableConfig.ReservedSystemCPUs,
|
||||||
|
cm.GetNodeAllocatableReservation(),
|
||||||
|
nodeConfig.KubeletRootDir,
|
||||||
|
cm.topologyManager,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "Failed to initialize cpu manager")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cm.topologyManager.AddHintProvider(cm.cpuManager)
|
||||||
|
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.MemoryManager) {
|
||||||
|
klog.InfoS("Creating memory manager")
|
||||||
|
cm.memoryManager, err = memorymanager.NewManager(
|
||||||
|
nodeConfig.ExperimentalMemoryManagerPolicy,
|
||||||
|
machineInfo,
|
||||||
|
cm.GetNodeAllocatableReservation(),
|
||||||
|
nodeConfig.ExperimentalMemoryManagerReservedMemory,
|
||||||
|
nodeConfig.KubeletRootDir,
|
||||||
|
cm.topologyManager,
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
klog.ErrorS(err, "Failed to initialize memory manager")
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
cm.topologyManager.AddHintProvider(cm.memoryManager)
|
||||||
|
}
|
||||||
|
|
||||||
return cm, nil
|
return cm, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -132,7 +191,9 @@ func (cm *containerManagerImpl) SystemCgroupsLimit() v1.ResourceList {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (cm *containerManagerImpl) GetNodeConfig() NodeConfig {
|
func (cm *containerManagerImpl) GetNodeConfig() NodeConfig {
|
||||||
return NodeConfig{}
|
cm.RLock()
|
||||||
|
defer cm.RUnlock()
|
||||||
|
return cm.nodeConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *containerManagerImpl) GetMountedSubsystems() *CgroupSubsystems {
|
func (cm *containerManagerImpl) GetMountedSubsystems() *CgroupSubsystems {
|
||||||
@ -223,7 +284,7 @@ func (cm *containerManagerImpl) UpdatePluginResources(node *schedulerframework.N
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (cm *containerManagerImpl) InternalContainerLifecycle() InternalContainerLifecycle {
|
func (cm *containerManagerImpl) InternalContainerLifecycle() InternalContainerLifecycle {
|
||||||
return &internalContainerLifecycleImpl{cpumanager.NewFakeManager(), memorymanager.NewFakeManager(), topologymanager.NewFakeManager()}
|
return &internalContainerLifecycleImpl{cm.cpuManager, cm.memoryManager, cm.topologyManager}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *containerManagerImpl) GetPodCgroupRoot() string {
|
func (cm *containerManagerImpl) GetPodCgroupRoot() string {
|
||||||
@ -243,19 +304,25 @@ func (cm *containerManagerImpl) ShouldResetExtendedResourceCapacity() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (cm *containerManagerImpl) GetAllocateResourcesPodAdmitHandler() lifecycle.PodAdmitHandler {
|
func (cm *containerManagerImpl) GetAllocateResourcesPodAdmitHandler() lifecycle.PodAdmitHandler {
|
||||||
return &noopWindowsResourceAllocator{}
|
return cm.topologyManager
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *containerManagerImpl) UpdateAllocatedDevices() {
|
func (cm *containerManagerImpl) UpdateAllocatedDevices() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *containerManagerImpl) GetCPUs(_, _ string) []int64 {
|
func (cm *containerManagerImpl) GetCPUs(podUID, containerName string) []int64 {
|
||||||
return nil
|
if cm.cpuManager != nil {
|
||||||
|
return int64Slice(cm.cpuManager.GetExclusiveCPUs(podUID, containerName).UnsortedList())
|
||||||
|
}
|
||||||
|
return []int64{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *containerManagerImpl) GetAllocatableCPUs() []int64 {
|
func (cm *containerManagerImpl) GetAllocatableCPUs() []int64 {
|
||||||
return nil
|
if cm.cpuManager != nil {
|
||||||
|
return int64Slice(cm.cpuManager.GetAllocatableCPUs().UnsortedList())
|
||||||
|
}
|
||||||
|
return []int64{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cm *containerManagerImpl) GetMemory(_, _ string) []*podresourcesapi.ContainerMemory {
|
func (cm *containerManagerImpl) GetMemory(_, _ string) []*podresourcesapi.ContainerMemory {
|
||||||
|
@ -19,7 +19,9 @@ package cpumanager
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/winstats"
|
||||||
"math"
|
"math"
|
||||||
|
"runtime"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -265,6 +267,7 @@ func (m *manager) Allocate(p *v1.Pod, c *v1.Container) error {
|
|||||||
defer m.Unlock()
|
defer m.Unlock()
|
||||||
|
|
||||||
// Call down into the policy to assign this container CPUs if required.
|
// Call down into the policy to assign this container CPUs if required.
|
||||||
|
klog.InfoS("jjs allocate call")
|
||||||
err := m.policy.Allocate(m.state, p, c)
|
err := m.policy.Allocate(m.state, p, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.ErrorS(err, "Allocate error")
|
klog.ErrorS(err, "Allocate error")
|
||||||
@ -533,6 +536,24 @@ func (m *manager) updateContainerCPUSet(ctx context.Context, containerID string,
|
|||||||
// helpers_linux.go similar to what exists for pods.
|
// helpers_linux.go similar to what exists for pods.
|
||||||
// It would be better to pass the full container resources here instead of
|
// It would be better to pass the full container resources here instead of
|
||||||
// this patch-like partial resources.
|
// this patch-like partial resources.
|
||||||
|
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
klog.Info("Updating windows CPU affinity")
|
||||||
|
|
||||||
|
affinities := winstats.CpusToGroupAffinity(cpus.List())
|
||||||
|
var cpuGroupAffinities []*runtimeapi.WindowsCpuGroupAffinity
|
||||||
|
for _, affinity := range affinities {
|
||||||
|
cpuGroupAffinities = append(cpuGroupAffinities, &runtimeapi.WindowsCpuGroupAffinity{
|
||||||
|
CpuGroup: uint32(affinity.Group),
|
||||||
|
CpuMask: uint64(affinity.Mask),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return m.containerRuntime.UpdateContainerResources(ctx, containerID, &runtimeapi.ContainerResources{
|
||||||
|
Windows: &runtimeapi.WindowsContainerResources{
|
||||||
|
AffinityCpus: cpuGroupAffinities,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
return m.containerRuntime.UpdateContainerResources(
|
return m.containerRuntime.UpdateContainerResources(
|
||||||
ctx,
|
ctx,
|
||||||
containerID,
|
containerID,
|
||||||
|
@ -300,8 +300,10 @@ func (p *staticPolicy) updateCPUsToReuse(pod *v1.Pod, container *v1.Container, c
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) (rerr error) {
|
func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) (rerr error) {
|
||||||
|
klog.InfoS("jjs Allocate", "pod", klog.KObj(pod), "containerName", container.Name)
|
||||||
numCPUs := p.guaranteedCPUs(pod, container)
|
numCPUs := p.guaranteedCPUs(pod, container)
|
||||||
if numCPUs == 0 {
|
if numCPUs == 0 {
|
||||||
|
klog.InfoS("shared pool", "pod", klog.KObj(pod), "containerName", container.Name)
|
||||||
// container belongs in the shared pool (nothing to do; use default cpuset)
|
// container belongs in the shared pool (nothing to do; use default cpuset)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -402,7 +404,7 @@ func (p *staticPolicy) RemoveContainer(s state.State, podUID string, containerNa
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bitmask.BitMask, reusableCPUs cpuset.CPUSet) (cpuset.CPUSet, error) {
|
func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bitmask.BitMask, reusableCPUs cpuset.CPUSet) (cpuset.CPUSet, error) {
|
||||||
klog.InfoS("AllocateCPUs", "numCPUs", numCPUs, "socket", numaAffinity)
|
klog.InfoS("CPUSet", "numCPUs", numCPUs, "socket", numaAffinity)
|
||||||
|
|
||||||
allocatableCPUs := p.GetAvailableCPUs(s).Union(reusableCPUs)
|
allocatableCPUs := p.GetAvailableCPUs(s).Union(reusableCPUs)
|
||||||
|
|
||||||
@ -440,6 +442,7 @@ func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bit
|
|||||||
|
|
||||||
func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int {
|
func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int {
|
||||||
if v1qos.GetPodQOS(pod) != v1.PodQOSGuaranteed {
|
if v1qos.GetPodQOS(pod) != v1.PodQOSGuaranteed {
|
||||||
|
klog.InfoS("Not guaranteed", "pod", pod.UID, "containerName", container.Name)
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
cpuQuantity := container.Resources.Requests[v1.ResourceCPU]
|
cpuQuantity := container.Resources.Requests[v1.ResourceCPU]
|
||||||
|
@ -21,9 +21,42 @@ package cm
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/winstats"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (i *internalContainerLifecycleImpl) PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *runtimeapi.ContainerConfig) error {
|
func (i *internalContainerLifecycleImpl) PreCreateContainer(pod *v1.Pod, container *v1.Container, containerConfig *runtimeapi.ContainerConfig) error {
|
||||||
|
klog.Info("PreCreateContainer for Windows")
|
||||||
|
if i.cpuManager != nil {
|
||||||
|
allocatedCPUs := i.cpuManager.GetCPUAffinity(string(pod.UID), container.Name)
|
||||||
|
if !allocatedCPUs.IsEmpty() {
|
||||||
|
klog.Infof("Setting CPU affinity for container %q cpus %v", container.Name, allocatedCPUs.String())
|
||||||
|
var cpuGroupAffinities []*runtimeapi.WindowsCpuGroupAffinity
|
||||||
|
affinities := winstats.CpusToGroupAffinity(allocatedCPUs.List())
|
||||||
|
for _, affinity := range affinities {
|
||||||
|
klog.Infof("Setting CPU affinity for container %q in group %v with mask %v (processors %v)", container.Name, affinity.Group, affinity.Mask, affinity.Processors())
|
||||||
|
cpuGroupAffinities = append(cpuGroupAffinities, &runtimeapi.WindowsCpuGroupAffinity{
|
||||||
|
CpuGroup: uint32(affinity.Group),
|
||||||
|
CpuMask: uint64(affinity.Mask),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
containerConfig.Windows.Resources.AffinityCpus = cpuGroupAffinities
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if i.memoryManager != nil {
|
||||||
|
numaNodes := i.memoryManager.GetMemoryNUMANodes(pod, container)
|
||||||
|
if numaNodes.Len() > 0 {
|
||||||
|
var affinity []int64
|
||||||
|
for _, numaNode := range sets.List(numaNodes) {
|
||||||
|
affinity = append(affinity, int64(numaNode))
|
||||||
|
}
|
||||||
|
klog.Info("Setting memory NUMA nodes for container")
|
||||||
|
containerConfig.Windows.Resources.AffinityPrefferedNumaNodes = affinity
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1421,7 +1421,7 @@ func (kl *Kubelet) setupDataDirs() error {
|
|||||||
if err := kl.hostutil.MakeRShared(kl.getRootDir()); err != nil {
|
if err := kl.hostutil.MakeRShared(kl.getRootDir()); err != nil {
|
||||||
return fmt.Errorf("error configuring root directory: %v", err)
|
return fmt.Errorf("error configuring root directory: %v", err)
|
||||||
}
|
}
|
||||||
if err := os.MkdirAll(kl.getPodsDir(), 0750); err != nil {
|
if err := utilfs.MkdirAll(kl.getPodsDir(), 0750); err != nil {
|
||||||
return fmt.Errorf("error creating pods directory: %v", err)
|
return fmt.Errorf("error creating pods directory: %v", err)
|
||||||
}
|
}
|
||||||
if err := utilfs.MkdirAll(kl.getPluginsDir(), 0750); err != nil {
|
if err := utilfs.MkdirAll(kl.getPluginsDir(), 0750); err != nil {
|
||||||
|
250
pkg/kubelet/winstats/cpu_topology.go
Normal file
250
pkg/kubelet/winstats/cpu_topology.go
Normal file
@ -0,0 +1,250 @@
|
|||||||
|
package winstats
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
procGetLogicalProcessorInformationEx = modkernel32.NewProc("GetLogicalProcessorInformationEx")
|
||||||
|
getNumaAvailableMemoryNodeEx = modkernel32.NewProc("GetNumaAvailableMemoryNodeEx")
|
||||||
|
)
|
||||||
|
|
||||||
|
type RelationType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
RelationProcessorCore RelationType = iota
|
||||||
|
RelationNumaNode
|
||||||
|
RelationCache
|
||||||
|
RelationProcessorPackage
|
||||||
|
RelationGroup
|
||||||
|
RelationProcessorDie
|
||||||
|
RelationNumaNodeEx
|
||||||
|
RelationProcessorModule
|
||||||
|
RelationAll = 0xffff
|
||||||
|
)
|
||||||
|
|
||||||
|
type SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX struct {
|
||||||
|
Relationship uint32
|
||||||
|
Size uint32
|
||||||
|
data interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type PROCESSOR_RELATIONSHIP struct {
|
||||||
|
Flags byte
|
||||||
|
EfficiencyClass byte
|
||||||
|
Reserved [20]byte
|
||||||
|
GroupCount uint16
|
||||||
|
GroupMasks interface{} //[]GROUP_AFFINITY // in c++ this is a union of either one or many GROUP_AFFINITY based on GroupCount
|
||||||
|
}
|
||||||
|
|
||||||
|
type GROUP_AFFINITY struct {
|
||||||
|
Mask uintptr
|
||||||
|
Group uint16
|
||||||
|
Reserved [3]uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a GROUP_AFFINITY) Processors() []int {
|
||||||
|
processors := []int{}
|
||||||
|
for i := 0; i < 64; i++ {
|
||||||
|
if a.Mask&(1<<i) != 0 {
|
||||||
|
processors = append(processors, i+(int(a.Group)*64))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return processors
|
||||||
|
}
|
||||||
|
|
||||||
|
func CpusToGroupAffinity(cpus []int) map[int]*GROUP_AFFINITY {
|
||||||
|
groupAffinities := make(map[int]*GROUP_AFFINITY)
|
||||||
|
for _, cpu := range cpus {
|
||||||
|
group := uint16(cpu / 64)
|
||||||
|
|
||||||
|
groupaffinity, ok := groupAffinities[int(group)]
|
||||||
|
if !ok {
|
||||||
|
groupaffinity = &GROUP_AFFINITY{
|
||||||
|
Group: group,
|
||||||
|
}
|
||||||
|
groupAffinities[int(group)] = groupaffinity
|
||||||
|
}
|
||||||
|
mask := uintptr(1 << (cpu % 64))
|
||||||
|
groupaffinity.Mask |= mask
|
||||||
|
}
|
||||||
|
return groupAffinities
|
||||||
|
}
|
||||||
|
|
||||||
|
type NUMA_NODE_RELATIONSHIP struct {
|
||||||
|
NodeNumber uint32
|
||||||
|
Reserved [18]byte
|
||||||
|
GroupCount uint16
|
||||||
|
GroupMasks interface{} //[]GROUP_AFFINITY // in c++ this is a union of either one or many GROUP_AFFINITY based on GroupCount
|
||||||
|
}
|
||||||
|
|
||||||
|
type CACHE_RELATIONSHIP struct {
|
||||||
|
Level byte
|
||||||
|
Associativity byte
|
||||||
|
LineSize uint16
|
||||||
|
CacheSize uint32
|
||||||
|
Type PROCESSOR_CACHE_TYPE
|
||||||
|
Reserved [18]byte
|
||||||
|
GroupCount uint16
|
||||||
|
GroupMasks interface{} //interface{}[]GROUP_AFFINITY // in c++ this is a union of either one or many GROUP_AFFINITY based on GroupCount
|
||||||
|
}
|
||||||
|
|
||||||
|
type PROCESSOR_CACHE_TYPE int
|
||||||
|
|
||||||
|
const (
|
||||||
|
CacheUnified PROCESSOR_CACHE_TYPE = iota
|
||||||
|
CacheInstruction
|
||||||
|
CacheData
|
||||||
|
CacheTrace
|
||||||
|
CacheUnknown
|
||||||
|
)
|
||||||
|
|
||||||
|
type GROUP_RELATIONSHIP struct {
|
||||||
|
MaximumGroupCount uint16
|
||||||
|
ActiveGroupCount uint16
|
||||||
|
Reserved [20]byte
|
||||||
|
GroupInfo interface{} //[]PROCESSOR_GROUP_INFO
|
||||||
|
}
|
||||||
|
|
||||||
|
type PROCESSOR_GROUP_INFO struct {
|
||||||
|
MaximumProcessorCount byte
|
||||||
|
ActiveProcessorCount byte
|
||||||
|
Reserved [38]byte
|
||||||
|
ActiveProcessorMask uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
type processor struct {
|
||||||
|
CoreID int
|
||||||
|
SocketID int
|
||||||
|
NodeID int
|
||||||
|
}
|
||||||
|
|
||||||
|
func processorInfo(relationShip RelationType) (int, int, []cadvisorapi.Node, error) {
|
||||||
|
// Call once to get the length of data to return
|
||||||
|
var returnLength uint32 = 0
|
||||||
|
r1, _, err := procGetLogicalProcessorInformationEx.Call(
|
||||||
|
uintptr(relationShip),
|
||||||
|
uintptr(0),
|
||||||
|
uintptr(unsafe.Pointer(&returnLength)),
|
||||||
|
)
|
||||||
|
if r1 != 0 && err.(syscall.Errno) != syscall.ERROR_INSUFFICIENT_BUFFER {
|
||||||
|
return 0, 0, nil, fmt.Errorf("Call to GetLogicalProcessorInformationEx failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allocate the buffer with the length it should be
|
||||||
|
buffer := make([]byte, returnLength)
|
||||||
|
|
||||||
|
// Call GetLogicalProcessorInformationEx again to get the actual information
|
||||||
|
r1, _, err = procGetLogicalProcessorInformationEx.Call(
|
||||||
|
uintptr(relationShip),
|
||||||
|
uintptr(unsafe.Pointer(&buffer[0])),
|
||||||
|
uintptr(unsafe.Pointer(&returnLength)),
|
||||||
|
)
|
||||||
|
if r1 == 0 {
|
||||||
|
return 0, 0, nil, fmt.Errorf("Call to GetLogicalProcessorInformationEx failed: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return convertWinApiToCadvisorApi(buffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertWinApiToCadvisorApi(buffer []byte) (int, int, []cadvisorapi.Node, error) {
|
||||||
|
logicalProcessors := make(map[int]*processor)
|
||||||
|
numofSockets := 0
|
||||||
|
numOfcores := 0
|
||||||
|
nodes := []cadvisorapi.Node{}
|
||||||
|
//iterate over the buffer casting it to the correct type
|
||||||
|
for offset := 0; offset < len(buffer); {
|
||||||
|
//todo check if there is enough left in buffer to read system_logical_processor_information_ex?
|
||||||
|
info := (*SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX)(unsafe.Pointer(&buffer[offset]))
|
||||||
|
switch (RelationType)(info.Relationship) {
|
||||||
|
case RelationProcessorCore, RelationProcessorPackage:
|
||||||
|
processorRelationship := (*PROCESSOR_RELATIONSHIP)(unsafe.Pointer(&info.data))
|
||||||
|
groupMasks := make([]GROUP_AFFINITY, processorRelationship.GroupCount)
|
||||||
|
for i := 0; i < int(processorRelationship.GroupCount); i++ {
|
||||||
|
groupMasks[i] = *(*GROUP_AFFINITY)(unsafe.Pointer(uintptr(unsafe.Pointer(&processorRelationship.GroupMasks)) + uintptr(i)*unsafe.Sizeof(GROUP_AFFINITY{})))
|
||||||
|
}
|
||||||
|
|
||||||
|
if RelationProcessorCore == (RelationType)(info.Relationship) {
|
||||||
|
numOfcores++
|
||||||
|
}
|
||||||
|
|
||||||
|
if RelationProcessorPackage == (RelationType)(info.Relationship) {
|
||||||
|
numofSockets++
|
||||||
|
}
|
||||||
|
|
||||||
|
//iterate over group masks and add each processor to the map
|
||||||
|
for _, groupMask := range groupMasks {
|
||||||
|
for _, processorId := range groupMask.Processors() {
|
||||||
|
p, ok := logicalProcessors[processorId]
|
||||||
|
if !ok {
|
||||||
|
p = &processor{}
|
||||||
|
logicalProcessors[processorId] = p
|
||||||
|
}
|
||||||
|
if RelationProcessorCore == (RelationType)(info.Relationship) {
|
||||||
|
p.CoreID = numOfcores
|
||||||
|
}
|
||||||
|
if RelationProcessorPackage == (RelationType)(info.Relationship) {
|
||||||
|
p.SocketID = numofSockets
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case RelationNumaNode, RelationNumaNodeEx:
|
||||||
|
numaNodeRelationship := (*NUMA_NODE_RELATIONSHIP)(unsafe.Pointer(&info.data))
|
||||||
|
groupMasks := make([]GROUP_AFFINITY, numaNodeRelationship.GroupCount)
|
||||||
|
for i := 0; i < int(numaNodeRelationship.GroupCount); i++ {
|
||||||
|
groupMasks[i] = *(*GROUP_AFFINITY)(unsafe.Pointer(uintptr(unsafe.Pointer(&numaNodeRelationship.GroupMasks)) + uintptr(i)*unsafe.Sizeof(GROUP_AFFINITY{})))
|
||||||
|
}
|
||||||
|
|
||||||
|
nodes = append(nodes, cadvisorapi.Node{Id: int(numaNodeRelationship.NodeNumber)})
|
||||||
|
|
||||||
|
for _, groupMask := range groupMasks {
|
||||||
|
for processorId := range groupMask.Processors() {
|
||||||
|
p, ok := logicalProcessors[processorId]
|
||||||
|
if !ok {
|
||||||
|
p = &processor{}
|
||||||
|
logicalProcessors[processorId] = p
|
||||||
|
}
|
||||||
|
p.NodeID = int(numaNodeRelationship.NodeNumber)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case RelationCache:
|
||||||
|
//cacheRelationship := (*CACHE_RELATIONSHIP)(unsafe.Pointer(&info.data))
|
||||||
|
// TODO Process cache relationship data
|
||||||
|
|
||||||
|
default:
|
||||||
|
klog.V(4).Infof("Not using relationship type: %d", info.Relationship)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move the offset to the next SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX struct
|
||||||
|
offset += int(info.Size)
|
||||||
|
}
|
||||||
|
|
||||||
|
for processId, p := range logicalProcessors {
|
||||||
|
klog.V(4).Infof("Processor (%d): %v", processId, p)
|
||||||
|
node := nodes[p.NodeID]
|
||||||
|
if node.Id != p.NodeID {
|
||||||
|
return 0, 0, nil, fmt.Errorf("Node ID mismatch: %d != %d", node.Id, p.NodeID)
|
||||||
|
}
|
||||||
|
availableBytes := uint64(0)
|
||||||
|
r1, _, err := getNumaAvailableMemoryNodeEx.Call(uintptr(p.NodeID), uintptr(unsafe.Pointer(&availableBytes)))
|
||||||
|
if r1 == 0 {
|
||||||
|
return 0, 0, nil, fmt.Errorf("Call to GetNumaAvailableMemoryNodeEx failed: %v", err)
|
||||||
|
}
|
||||||
|
node.Memory = availableBytes
|
||||||
|
node.AddThread(processId, p.CoreID)
|
||||||
|
ok, coreIdx := node.FindCore(p.CoreID)
|
||||||
|
if !ok {
|
||||||
|
return 0, 0, nil, fmt.Errorf("Core not found: %d", p.CoreID)
|
||||||
|
}
|
||||||
|
node.Cores[coreIdx].SocketID = p.SocketID
|
||||||
|
nodes[p.NodeID] = node
|
||||||
|
}
|
||||||
|
|
||||||
|
return numOfcores, numofSockets, nodes, nil
|
||||||
|
}
|
376
pkg/kubelet/winstats/cpu_topology_test.go
Normal file
376
pkg/kubelet/winstats/cpu_topology_test.go
Normal file
@ -0,0 +1,376 @@
|
|||||||
|
package winstats
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"testing"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestGROUP_AFFINITY_Processors(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
Mask uintptr
|
||||||
|
Group uint16
|
||||||
|
want []int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "empty",
|
||||||
|
Mask: 0,
|
||||||
|
Group: 0,
|
||||||
|
want: []int{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "empty group 2",
|
||||||
|
Mask: 0,
|
||||||
|
Group: 1,
|
||||||
|
want: []int{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cpu 1 Group 0",
|
||||||
|
Mask: 1,
|
||||||
|
Group: 0,
|
||||||
|
want: []int{0},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cpu 64 Group 0",
|
||||||
|
Mask: 1 << 63,
|
||||||
|
Group: 0,
|
||||||
|
want: []int{63},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cpu 128 Group 1",
|
||||||
|
Mask: 1 << 63,
|
||||||
|
Group: 1,
|
||||||
|
want: []int{127},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "cpu 128 (Group 1)",
|
||||||
|
Mask: 1 << 63,
|
||||||
|
Group: 1,
|
||||||
|
want: []int{127},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Mask 1 Group 2",
|
||||||
|
Mask: 1,
|
||||||
|
Group: 2,
|
||||||
|
want: []int{128},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "64 cpus group 0",
|
||||||
|
Mask: 0xffffffffffffffff,
|
||||||
|
Group: 0,
|
||||||
|
want: makeRange(0, 63),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "64 cpus group 1",
|
||||||
|
Mask: 0xffffffffffffffff,
|
||||||
|
Group: 1,
|
||||||
|
want: makeRange(64, 127),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "64 cpus group 1",
|
||||||
|
Mask: 0xffffffffffffffff,
|
||||||
|
Group: 1,
|
||||||
|
want: makeRange(64, 127),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
a := GROUP_AFFINITY{
|
||||||
|
Mask: tt.Mask,
|
||||||
|
Group: tt.Group,
|
||||||
|
}
|
||||||
|
assert.Equalf(t, tt.want, a.Processors(), "Processors()")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// https://stackoverflow.com/a/39868255/697126
|
||||||
|
func makeRange(min, max int) []int {
|
||||||
|
a := make([]int, max-min+1)
|
||||||
|
for i := range a {
|
||||||
|
a[i] = min + i
|
||||||
|
}
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCpusToGroupAffinity(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
cpus []int
|
||||||
|
want map[int]*GROUP_AFFINITY
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "empty",
|
||||||
|
want: map[int]*GROUP_AFFINITY{},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "single cpu group 0",
|
||||||
|
cpus: []int{0},
|
||||||
|
want: map[int]*GROUP_AFFINITY{
|
||||||
|
0: {
|
||||||
|
Mask: 1,
|
||||||
|
Group: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "single cpu group 0",
|
||||||
|
cpus: []int{63},
|
||||||
|
want: map[int]*GROUP_AFFINITY{
|
||||||
|
0: {
|
||||||
|
Mask: 1 << 63,
|
||||||
|
Group: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "single cpu group 1",
|
||||||
|
cpus: []int{64},
|
||||||
|
want: map[int]*GROUP_AFFINITY{
|
||||||
|
1: {
|
||||||
|
Mask: 1,
|
||||||
|
Group: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple cpus same group",
|
||||||
|
cpus: []int{0, 1, 2},
|
||||||
|
want: map[int]*GROUP_AFFINITY{
|
||||||
|
0: {
|
||||||
|
Mask: 1 | 2 | 4, // Binary OR to combine the masks
|
||||||
|
Group: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple cpus different groups",
|
||||||
|
cpus: []int{0, 64},
|
||||||
|
want: map[int]*GROUP_AFFINITY{
|
||||||
|
0: {
|
||||||
|
Mask: 1,
|
||||||
|
Group: 0,
|
||||||
|
},
|
||||||
|
1: {
|
||||||
|
Mask: 1,
|
||||||
|
Group: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "multiple cpus different groups",
|
||||||
|
cpus: []int{0, 1, 2, 64, 65, 66},
|
||||||
|
want: map[int]*GROUP_AFFINITY{
|
||||||
|
0: {
|
||||||
|
Mask: 1 | 2 | 4,
|
||||||
|
Group: 0,
|
||||||
|
},
|
||||||
|
1: {
|
||||||
|
Mask: 1 | 2 | 4,
|
||||||
|
Group: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "64 cpus group 0",
|
||||||
|
cpus: makeRange(0, 63),
|
||||||
|
want: map[int]*GROUP_AFFINITY{
|
||||||
|
0: {
|
||||||
|
Mask: 0xffffffffffffffff, // All 64 bits set
|
||||||
|
Group: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "64 cpus group 1",
|
||||||
|
cpus: makeRange(64, 127),
|
||||||
|
want: map[int]*GROUP_AFFINITY{
|
||||||
|
1: {
|
||||||
|
Mask: 0xffffffffffffffff, // All 64 bits set
|
||||||
|
Group: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
assert.Equalf(t, tt.want, CpusToGroupAffinity(tt.cpus), "CpusToGroupAffinity(%v)", tt.cpus)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_convertWinApiToCadvisorApi(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
buffer []byte
|
||||||
|
expectedNumOfCores int
|
||||||
|
expectedNumOfSockets int
|
||||||
|
expectedNodes []cadvisorapi.Node
|
||||||
|
wantErr assert.ErrorAssertionFunc
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "empty",
|
||||||
|
buffer: []byte{},
|
||||||
|
expectedNumOfCores: 0,
|
||||||
|
expectedNumOfSockets: 0,
|
||||||
|
expectedNodes: []cadvisorapi.Node{},
|
||||||
|
wantErr: assert.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "single core",
|
||||||
|
buffer: createProcessorRelationships([]int{0}),
|
||||||
|
expectedNumOfCores: 1,
|
||||||
|
expectedNumOfSockets: 1,
|
||||||
|
expectedNodes: []cadvisorapi.Node{
|
||||||
|
{
|
||||||
|
Id: 0,
|
||||||
|
Cores: []cadvisorapi.Core{
|
||||||
|
{
|
||||||
|
Id: 1,
|
||||||
|
Threads: []int{0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: assert.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "single core, multiple cpus",
|
||||||
|
buffer: createProcessorRelationships([]int{0, 1, 2}),
|
||||||
|
expectedNumOfCores: 1,
|
||||||
|
expectedNumOfSockets: 1,
|
||||||
|
expectedNodes: []cadvisorapi.Node{
|
||||||
|
{
|
||||||
|
Id: 0,
|
||||||
|
Cores: []cadvisorapi.Core{
|
||||||
|
{
|
||||||
|
Id: 1,
|
||||||
|
Threads: []int{0, 1, 2},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: assert.NoError,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "single core, multiple groups",
|
||||||
|
buffer: createProcessorRelationships([]int{0, 64}),
|
||||||
|
expectedNumOfCores: 1,
|
||||||
|
expectedNumOfSockets: 1,
|
||||||
|
expectedNodes: []cadvisorapi.Node{
|
||||||
|
{
|
||||||
|
Id: 0,
|
||||||
|
Cores: []cadvisorapi.Core{
|
||||||
|
{
|
||||||
|
Id: 1,
|
||||||
|
Threads: []int{0, 64},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
wantErr: assert.NoError,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
numOfCores, numOfSockets, nodes, err := convertWinApiToCadvisorApi(tt.buffer)
|
||||||
|
if !tt.wantErr(t, err, fmt.Sprintf("convertWinApiToCadvisorApi(%v)", tt.name)) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
assert.Equalf(t, tt.expectedNumOfCores, numOfCores, "num of cores")
|
||||||
|
assert.Equalf(t, tt.expectedNumOfSockets, numOfSockets, "num of sockets")
|
||||||
|
for node := range nodes {
|
||||||
|
assert.Equalf(t, tt.expectedNodes[node].Id, nodes[node].Id, "node id")
|
||||||
|
for core := range nodes[node].Cores {
|
||||||
|
assert.Equalf(t, tt.expectedNodes[node].Cores[core].Id, nodes[node].Cores[core].Id, "core id")
|
||||||
|
assert.Equalf(t, tt.expectedNodes[node].Cores[core].Threads, nodes[node].Cores[core].Threads, "threads")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func genbuffer(infos ...SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX) []byte {
|
||||||
|
var buffer []byte
|
||||||
|
for _, info := range infos {
|
||||||
|
buffer = append(buffer, structToBytes(info)...)
|
||||||
|
}
|
||||||
|
return buffer
|
||||||
|
}
|
||||||
|
|
||||||
|
func createProcessorRelationships(cpus []int) []byte {
|
||||||
|
groups := CpusToGroupAffinity(cpus)
|
||||||
|
grouplen := len(groups)
|
||||||
|
groupAffinities := make([]GROUP_AFFINITY, 0, grouplen)
|
||||||
|
for _, group := range groups {
|
||||||
|
groupAffinities = append(groupAffinities, *group)
|
||||||
|
}
|
||||||
|
return genbuffer(SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX{
|
||||||
|
Relationship: uint32(RelationProcessorCore),
|
||||||
|
Size: uint32(SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX_SIZE + PROCESSOR_RELATIONSHIP_SIZE + (GROUP_AFFINITY_SIZE * grouplen)),
|
||||||
|
data: PROCESSOR_RELATIONSHIP{
|
||||||
|
Flags: 0,
|
||||||
|
EfficiencyClass: 0,
|
||||||
|
Reserved: [20]byte{},
|
||||||
|
GroupCount: uint16(grouplen),
|
||||||
|
GroupMasks: groupAffinities,
|
||||||
|
},
|
||||||
|
}, SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX{
|
||||||
|
Relationship: uint32(RelationNumaNode),
|
||||||
|
Size: uint32(SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX_SIZE + NUMA_NODE_RELATIONSHIP_SIZE + (GROUP_AFFINITY_SIZE * grouplen)),
|
||||||
|
data: NUMA_NODE_RELATIONSHIP{
|
||||||
|
NodeNumber: 0,
|
||||||
|
Reserved: [18]byte{},
|
||||||
|
GroupCount: uint16(grouplen),
|
||||||
|
GroupMasks: groupAffinities,
|
||||||
|
}}, SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX{
|
||||||
|
Relationship: uint32(RelationProcessorPackage),
|
||||||
|
Size: uint32(SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX_SIZE + PROCESSOR_RELATIONSHIP_SIZE + (GROUP_AFFINITY_SIZE * grouplen)),
|
||||||
|
data: PROCESSOR_RELATIONSHIP{
|
||||||
|
Flags: 0,
|
||||||
|
EfficiencyClass: 0,
|
||||||
|
Reserved: [20]byte{},
|
||||||
|
GroupCount: uint16(grouplen),
|
||||||
|
GroupMasks: groupAffinities,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
const SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX_SIZE = 8
|
||||||
|
const PROCESSOR_RELATIONSHIP_SIZE = 24
|
||||||
|
const NUMA_NODE_RELATIONSHIP_SIZE = 24
|
||||||
|
const GROUP_AFFINITY_SIZE = int(unsafe.Sizeof(GROUP_AFFINITY{})) // this one is known at compile time
|
||||||
|
|
||||||
|
func structToBytes(info SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX) []byte {
|
||||||
|
var pri []byte = (*(*[SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX_SIZE]byte)(unsafe.Pointer(&info)))[:SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX_SIZE]
|
||||||
|
|
||||||
|
switch info.data.(type) {
|
||||||
|
case PROCESSOR_RELATIONSHIP:
|
||||||
|
rel := info.data.(PROCESSOR_RELATIONSHIP)
|
||||||
|
var prBytes []byte = (*(*[PROCESSOR_RELATIONSHIP_SIZE]byte)(unsafe.Pointer(&rel)))[:PROCESSOR_RELATIONSHIP_SIZE]
|
||||||
|
pri = append(pri, prBytes...)
|
||||||
|
|
||||||
|
groupAffinities := rel.GroupMasks.([]GROUP_AFFINITY)
|
||||||
|
|
||||||
|
for _, groupAffinity := range groupAffinities {
|
||||||
|
var groupByte []byte = (*(*[GROUP_AFFINITY_SIZE]byte)(unsafe.Pointer(&groupAffinity)))[:]
|
||||||
|
pri = append(pri, groupByte...)
|
||||||
|
}
|
||||||
|
case NUMA_NODE_RELATIONSHIP:
|
||||||
|
numa := info.data.(NUMA_NODE_RELATIONSHIP)
|
||||||
|
var nameBytes []byte = (*(*[NUMA_NODE_RELATIONSHIP_SIZE]byte)(unsafe.Pointer(&numa)))[:NUMA_NODE_RELATIONSHIP_SIZE]
|
||||||
|
pri = append(pri, nameBytes...)
|
||||||
|
|
||||||
|
groupAffinities := numa.GroupMasks.([]GROUP_AFFINITY)
|
||||||
|
|
||||||
|
for _, groupAffinity := range groupAffinities {
|
||||||
|
var groupByte []byte = (*(*[GROUP_AFFINITY_SIZE]byte)(unsafe.Pointer(&groupAffinity)))[:]
|
||||||
|
pri = append(pri, groupByte...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return pri
|
||||||
|
}
|
@ -177,12 +177,20 @@ func (p *perfCounterNodeStatsClient) getMachineInfo() (*cadvisorapi.MachineInfo,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
numOfPysicalCores, numOfSockets, topology, err := processorInfo(RelationAll)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return &cadvisorapi.MachineInfo{
|
return &cadvisorapi.MachineInfo{
|
||||||
NumCores: ProcessorCount(),
|
NumCores: ProcessorCount(),
|
||||||
MemoryCapacity: p.nodeInfo.memoryPhysicalCapacityBytes,
|
NumSockets: numOfSockets,
|
||||||
MachineID: hostname,
|
NumPhysicalCores: numOfPysicalCores,
|
||||||
SystemUUID: systemUUID,
|
MemoryCapacity: p.nodeInfo.memoryPhysicalCapacityBytes,
|
||||||
BootID: bootId,
|
MachineID: hostname,
|
||||||
|
SystemUUID: systemUUID,
|
||||||
|
BootID: bootId,
|
||||||
|
Topology: topology,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user