Cache the discovered topology in the CPUManager instead of MachineInfo

This commit is contained in:
Kevin Klues 2019-08-27 16:23:07 -05:00
parent 038e5fad75
commit 869962fa48
3 changed files with 31 additions and 31 deletions

View File

@ -94,7 +94,7 @@ type manager struct {
// and the containerID of their containers // and the containerID of their containers
podStatusProvider status.PodStatusProvider podStatusProvider status.PodStatusProvider
machineInfo *cadvisorapi.MachineInfo topology *topology.CPUTopology
nodeAllocatableReservation v1.ResourceList nodeAllocatableReservation v1.ResourceList
} }
@ -103,6 +103,7 @@ var _ Manager = &manager{}
// NewManager creates new cpu manager based on provided policy // NewManager creates new cpu manager based on provided policy
func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, stateFileDirectory string, affinity topologymanager.Store) (Manager, error) { func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo *cadvisorapi.MachineInfo, nodeAllocatableReservation v1.ResourceList, stateFileDirectory string, affinity topologymanager.Store) (Manager, error) {
var topo *topology.CPUTopology
var policy Policy var policy Policy
switch policyName(cpuPolicyName) { switch policyName(cpuPolicyName) {
@ -111,6 +112,7 @@ func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo
policy = NewNonePolicy() policy = NewNonePolicy()
case PolicyStatic: case PolicyStatic:
var err error
topo, err := topology.Discover(machineInfo) topo, err := topology.Discover(machineInfo)
if err != nil { if err != nil {
return nil, err return nil, err
@ -149,7 +151,7 @@ func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo
policy: policy, policy: policy,
reconcilePeriod: reconcilePeriod, reconcilePeriod: reconcilePeriod,
state: stateImpl, state: stateImpl,
machineInfo: machineInfo, topology: topo,
nodeAllocatableReservation: nodeAllocatableReservation, nodeAllocatableReservation: nodeAllocatableReservation,
} }
return manager, nil return manager, nil

View File

@ -20,7 +20,6 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/klog" "k8s.io/klog"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/socketmask" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/socketmask"
@ -72,26 +71,18 @@ func (m *manager) GetTopologyHints(pod v1.Pod, container v1.Container) map[strin
// bits set as the narrowest matching SocketAffinity with 'Preferred: true', and // bits set as the narrowest matching SocketAffinity with 'Preferred: true', and
// marking all others with 'Preferred: false'. // marking all others with 'Preferred: false'.
func (m *manager) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, request int) []topologymanager.TopologyHint { func (m *manager) generateCPUTopologyHints(availableCPUs cpuset.CPUSet, request int) []topologymanager.TopologyHint {
// Discover topology in order to establish the number
// of available CPUs per socket.
topo, err := topology.Discover(m.machineInfo)
if err != nil {
klog.Warningf("[cpu manager] Error discovering topology for TopologyHint generation")
return nil
}
// Initialize minAffinity to a full affinity mask. // Initialize minAffinity to a full affinity mask.
minAffinity, _ := socketmask.NewSocketMask() minAffinity, _ := socketmask.NewSocketMask()
minAffinity.Fill() minAffinity.Fill()
// Iterate through all combinations of socketMasks and build hints from them. // Iterate through all combinations of socketMasks and build hints from them.
hints := []topologymanager.TopologyHint{} hints := []topologymanager.TopologyHint{}
socketmask.IterateSocketMasks(topo.CPUDetails.Sockets().ToSlice(), func(mask socketmask.SocketMask) { socketmask.IterateSocketMasks(m.topology.CPUDetails.Sockets().ToSlice(), func(mask socketmask.SocketMask) {
// Check to see if we have enough CPUs available on the current // Check to see if we have enough CPUs available on the current
// SocketMask to satisfy the CPU request. // SocketMask to satisfy the CPU request.
numMatching := 0 numMatching := 0
for _, c := range availableCPUs.ToSlice() { for _, c := range availableCPUs.ToSlice() {
if mask.IsSet(topo.CPUDetails[c].SocketID) { if mask.IsSet(m.topology.CPUDetails[c].SocketID) {
numMatching++ numMatching++
} }
} }

View File

@ -23,6 +23,7 @@ import (
cadvisorapi "github.com/google/cadvisor/info/v1" cadvisorapi "github.com/google/cadvisor/info/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology"
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset" "k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/socketmask" "k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/socketmask"
@ -49,30 +50,36 @@ func TestGetTopologyHints(t *testing.T) {
secondSocketMask, _ := socketmask.NewSocketMask(1) secondSocketMask, _ := socketmask.NewSocketMask(1)
crossSocketMask, _ := socketmask.NewSocketMask(0, 1) crossSocketMask, _ := socketmask.NewSocketMask(0, 1)
m := manager{ machineInfo := cadvisorapi.MachineInfo{
policy: &staticPolicy{}, NumCores: 12,
machineInfo: &cadvisorapi.MachineInfo{ Topology: []cadvisorapi.Node{
NumCores: 12, {Id: 0,
Topology: []cadvisorapi.Node{ Cores: []cadvisorapi.Core{
{Id: 0, {Id: 0, Threads: []int{0, 6}},
Cores: []cadvisorapi.Core{ {Id: 1, Threads: []int{1, 7}},
{Id: 0, Threads: []int{0, 6}}, {Id: 2, Threads: []int{2, 8}},
{Id: 1, Threads: []int{1, 7}},
{Id: 2, Threads: []int{2, 8}},
},
},
{Id: 1,
Cores: []cadvisorapi.Core{
{Id: 0, Threads: []int{3, 9}},
{Id: 1, Threads: []int{4, 10}},
{Id: 2, Threads: []int{5, 11}},
},
}, },
}, },
{Id: 1,
Cores: []cadvisorapi.Core{
{Id: 0, Threads: []int{3, 9}},
{Id: 1, Threads: []int{4, 10}},
{Id: 2, Threads: []int{5, 11}},
},
},
},
}
topology, _ := topology.Discover(&machineInfo)
m := manager{
policy: &staticPolicy{
topology: topology,
}, },
state: &mockState{ state: &mockState{
defaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11), defaultCPUSet: cpuset.NewCPUSet(2, 3, 4, 5, 6, 7, 8, 9, 10, 11),
}, },
topology: topology,
} }
tcases := []struct { tcases := []struct {