mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-14 22:33:34 +00:00
Cache the discovered topology in the CPUManager instead of MachineInfo
This commit is contained in:
parent
038e5fad75
commit
869962fa48
@ -94,7 +94,7 @@ type manager struct {
|
||||
// and the containerID of their containers
|
||||
podStatusProvider status.PodStatusProvider
|
||||
|
||||
machineInfo *cadvisorapi.MachineInfo
|
||||
topology *topology.CPUTopology
|
||||
|
||||
nodeAllocatableReservation v1.ResourceList
|
||||
}
|
||||
@ -103,6 +103,7 @@ var _ Manager = &manager{}
|
||||
|
||||
// NewManager creates new cpu manager based on provided policy
|
||||
func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, stateFileDirectory string, affinity topologymanager.Store) (Manager, error) {
|
||||
var topo *topology.CPUTopology
|
||||
var policy Policy
|
||||
|
||||
switch policyName(cpuPolicyName) {
|
||||
@ -111,6 +112,7 @@ func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo
|
||||
policy = NewNonePolicy()
|
||||
|
||||
case PolicyStatic:
|
||||
var err error
|
||||
topo, err := topology.Discover(machineInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -149,7 +151,7 @@ func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo
|
||||
policy: policy,
|
||||
reconcilePeriod: reconcilePeriod,
|
||||
state: stateImpl,
|
||||
machineInfo: machineInfo,
|
||||
topology: topo,
|
||||
nodeAllocatableReservation: nodeAllocatableReservation,
|
||||
}
|
||||
return manager, nil
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/socketmask"
|
||||
@ -72,26 +71,18 @@ func (m *manager) GetTopologyHints(pod v1.Pod, container v1.Container) map[strin
|
||||
// bits set as the narrowest matching SocketAffinity with 'Preferred: true', and
|
||||
// marking all others with 'Preferred: false'.
|
||||
func (m *manager) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, request int) []topologymanager.TopologyHint {
|
||||
// Discover topology in order to establish the number
|
||||
// of available CPUs per socket.
|
||||
topo, err := topology.Discover(m.machineInfo)
|
||||
if err != nil {
|
||||
klog.Warningf("[cpu manager] Error discovering topology for TopologyHint generation")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Initialize minAffinity to a full affinity mask.
|
||||
minAffinity, _ := socketmask.NewSocketMask()
|
||||
minAffinity.Fill()
|
||||
|
||||
// Iterate through all combinations of socketMasks and build hints from them.
|
||||
hints := []topologymanager.TopologyHint{}
|
||||
socketmask.IterateSocketMasks(topo.CPUDetails.Sockets().ToSlice(), func(mask socketmask.SocketMask) {
|
||||
socketmask.IterateSocketMasks(m.topology.CPUDetails.Sockets().ToSlice(), func(mask socketmask.SocketMask) {
|
||||
// Check to see if we have enough CPUs available on the current
|
||||
// SocketMask to satisfy the CPU request.
|
||||
numMatching := 0
|
||||
for _, c := range availableCPUs.ToSlice() {
|
||||
if mask.IsSet(topo.CPUDetails[c].SocketID) {
|
||||
if mask.IsSet(m.topology.CPUDetails[c].SocketID) {
|
||||
numMatching++
|
||||
}
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
|
||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/socketmask"
|
||||
@ -49,30 +50,36 @@ func TestGetTopologyHints(t *testing.T) {
|
||||
secondSocketMask, _ := socketmask.NewSocketMask(1)
|
||||
crossSocketMask, _ := socketmask.NewSocketMask(0, 1)
|
||||
|
||||
m := manager{
|
||||
policy: &staticPolicy{},
|
||||
machineInfo: &cadvisorapi.MachineInfo{
|
||||
NumCores: 12,
|
||||
Topology: []cadvisorapi.Node{
|
||||
{Id: 0,
|
||||
Cores: []cadvisorapi.Core{
|
||||
{Id: 0, Threads: []int{0, 6}},
|
||||
{Id: 1, Threads: []int{1, 7}},
|
||||
{Id: 2, Threads: []int{2, 8}},
|
||||
},
|
||||
},
|
||||
{Id: 1,
|
||||
Cores: []cadvisorapi.Core{
|
||||
{Id: 0, Threads: []int{3, 9}},
|
||||
{Id: 1, Threads: []int{4, 10}},
|
||||
{Id: 2, Threads: []int{5, 11}},
|
||||
},
|
||||
machineInfo := cadvisorapi.MachineInfo{
|
||||
NumCores: 12,
|
||||
Topology: []cadvisorapi.Node{
|
||||
{Id: 0,
|
||||
Cores: []cadvisorapi.Core{
|
||||
{Id: 0, Threads: []int{0, 6}},
|
||||
{Id: 1, Threads: []int{1, 7}},
|
||||
{Id: 2, Threads: []int{2, 8}},
|
||||
},
|
||||
},
|
||||
{Id: 1,
|
||||
Cores: []cadvisorapi.Core{
|
||||
{Id: 0, Threads: []int{3, 9}},
|
||||
{Id: 1, Threads: []int{4, 10}},
|
||||
{Id: 2, Threads: []int{5, 11}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
topology, _ := topology.Discover(&machineInfo)
|
||||
|
||||
m := manager{
|
||||
policy: &staticPolicy{
|
||||
topology: topology,
|
||||
},
|
||||
state: &mockState{
|
||||
defaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
|
||||
},
|
||||
topology: topology,
|
||||
}
|
||||
|
||||
tcases := []struct {
|
||||
|
Loading…
Reference in New Issue
Block a user