mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Update TopologyManager.GetTopologyHints() to take pointers
Previously, this function was taking full Pod and Container objects unnecessarily. This commit updates this so that they will take pointers instead.
This commit is contained in:
parent
adaa58b6cb
commit
bc686ea27b
@ -71,7 +71,7 @@ type Manager interface {
|
||||
// GetTopologyHints implements the topologymanager.HintProvider Interface
|
||||
// and is consulted to achieve NUMA aware resource alignment among this
|
||||
// and other resource controllers.
|
||||
GetTopologyHints(v1.Pod, v1.Container) map[string][]topologymanager.TopologyHint
|
||||
GetTopologyHints(*v1.Pod, *v1.Container) map[string][]topologymanager.TopologyHint
|
||||
}
|
||||
|
||||
type manager struct {
|
||||
@ -298,7 +298,7 @@ func (m *manager) State() state.Reader {
|
||||
return m.state
|
||||
}
|
||||
|
||||
func (m *manager) GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint {
|
||||
func (m *manager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
|
||||
// Garbage collect any stranded resources before providing TopologyHints
|
||||
m.removeStaleState()
|
||||
// Delegate to active policy
|
||||
|
@ -112,7 +112,7 @@ func (p *mockPolicy) RemoveContainer(s state.State, podUID string, containerName
|
||||
return p.err
|
||||
}
|
||||
|
||||
func (p *mockPolicy) GetTopologyHints(s state.State, pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint {
|
||||
func (p *mockPolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -50,7 +50,7 @@ func (m *fakeManager) RemoveContainer(containerID string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *fakeManager) GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint {
|
||||
func (m *fakeManager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
|
||||
klog.Infof("[fake cpumanager] Get Topology Hints")
|
||||
return map[string][]topologymanager.TopologyHint{}
|
||||
}
|
||||
|
@ -33,5 +33,5 @@ type Policy interface {
|
||||
// GetTopologyHints implements the topologymanager.HintProvider Interface
|
||||
// and is consulted to achieve NUMA aware resource alignment among this
|
||||
// and other resource controllers.
|
||||
GetTopologyHints(s state.State, pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint
|
||||
GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint
|
||||
}
|
||||
|
@ -52,6 +52,6 @@ func (p *nonePolicy) RemoveContainer(s state.State, podUID string, containerName
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *nonePolicy) GetTopologyHints(s state.State, pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint {
|
||||
func (p *nonePolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
|
||||
return nil
|
||||
}
|
||||
|
@ -276,7 +276,7 @@ func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int
|
||||
return int(cpuQuantity.Value())
|
||||
}
|
||||
|
||||
func (p *staticPolicy) GetTopologyHints(s state.State, pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint {
|
||||
func (p *staticPolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
|
||||
// If there are no CPU resources requested for this container, we do not
|
||||
// generate any topology hints.
|
||||
if _, ok := container.Resources.Requests[v1.ResourceCPU]; !ok {
|
||||
@ -284,7 +284,7 @@ func (p *staticPolicy) GetTopologyHints(s state.State, pod v1.Pod, container v1.
|
||||
}
|
||||
|
||||
// Get a count of how many guaranteed CPUs have been requested.
|
||||
requested := p.guaranteedCPUs(&pod, &container)
|
||||
requested := p.guaranteedCPUs(pod, container)
|
||||
|
||||
// If there are no guaranteed CPUs being requested, we do not generate
|
||||
// any topology hints. This can happen, for example, because init
|
||||
|
@ -265,7 +265,7 @@ func TestGetTopologyHints(t *testing.T) {
|
||||
sourcesReady: &sourcesReadyStub{},
|
||||
}
|
||||
|
||||
hints := m.GetTopologyHints(tc.pod, tc.container)[string(v1.ResourceCPU)]
|
||||
hints := m.GetTopologyHints(&tc.pod, &tc.container)[string(v1.ResourceCPU)]
|
||||
if len(tc.expectedHints) == 0 && len(hints) == 0 {
|
||||
continue
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ func (h *ManagerStub) GetWatcherHandler() cache.PluginHandler {
|
||||
}
|
||||
|
||||
// GetTopologyHints returns an empty TopologyHint map
|
||||
func (h *ManagerStub) GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint {
|
||||
func (h *ManagerStub) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
|
||||
return map[string][]topologymanager.TopologyHint{}
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,7 @@ import (
|
||||
// GetTopologyHints implements the TopologyManager HintProvider Interface which
|
||||
// ensures the Device Manager is consulted when Topology Aware Hints for each
|
||||
// container are created.
|
||||
func (m *ManagerImpl) GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint {
|
||||
func (m *ManagerImpl) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
|
||||
// Garbage collect any stranded device resources before providing TopologyHints
|
||||
m.updateAllocatedDevices(m.activePods())
|
||||
|
||||
|
@ -413,7 +413,7 @@ func TestGetTopologyHints(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
hints := m.GetTopologyHints(*pod, pod.Spec.Containers[0])
|
||||
hints := m.GetTopologyHints(pod, &pod.Spec.Containers[0])
|
||||
|
||||
for r := range tc.expectedHints {
|
||||
sort.SliceStable(hints[r], func(i, j int) bool {
|
||||
|
@ -67,7 +67,7 @@ type Manager interface {
|
||||
|
||||
// TopologyManager HintProvider provider indicates the Device Manager implements the Topology Manager Interface
|
||||
// and is consulted to make Topology aware resource alignments
|
||||
GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint
|
||||
GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint
|
||||
}
|
||||
|
||||
// DeviceRunContainerOptions contains the combined container runtime settings to consume its allocated devices.
|
||||
|
@ -858,7 +858,7 @@ func testPolicyMerge(policy Policy, tcases []policyMergeTestCase, t *testing.T)
|
||||
for _, tc := range tcases {
|
||||
var providersHints []map[string][]TopologyHint
|
||||
for _, provider := range tc.hp {
|
||||
hints := provider.GetTopologyHints(v1.Pod{}, v1.Container{})
|
||||
hints := provider.GetTopologyHints(&v1.Pod{}, &v1.Container{})
|
||||
providersHints = append(providersHints, hints)
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,7 @@ type HintProvider interface {
|
||||
// this function for each hint provider, and merges the hints to produce
|
||||
// a consensus "best" hint. The hint providers may subsequently query the
|
||||
// topology manager to influence actual resource assignment.
|
||||
GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]TopologyHint
|
||||
GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]TopologyHint
|
||||
}
|
||||
|
||||
//Store interface is to allow Hint Providers to retrieve pod affinity
|
||||
@ -164,7 +164,7 @@ func (m *manager) GetAffinity(podUID string, containerName string) TopologyHint
|
||||
return m.podTopologyHints[podUID][containerName]
|
||||
}
|
||||
|
||||
func (m *manager) accumulateProvidersHints(pod v1.Pod, container v1.Container) (providersHints []map[string][]TopologyHint) {
|
||||
func (m *manager) accumulateProvidersHints(pod *v1.Pod, container *v1.Container) (providersHints []map[string][]TopologyHint) {
|
||||
// Loop through all hint providers and save an accumulated list of the
|
||||
// hints returned by each hint provider.
|
||||
for _, provider := range m.hintProviders {
|
||||
@ -177,7 +177,7 @@ func (m *manager) accumulateProvidersHints(pod v1.Pod, container v1.Container) (
|
||||
}
|
||||
|
||||
// Collect Hints from hint providers and pass to policy to retrieve the best one.
|
||||
func (m *manager) calculateAffinity(pod v1.Pod, container v1.Container) (TopologyHint, bool) {
|
||||
func (m *manager) calculateAffinity(pod *v1.Pod, container *v1.Container) (TopologyHint, bool) {
|
||||
providersHints := m.accumulateProvidersHints(pod, container)
|
||||
bestHint, admit := m.policy.Merge(providersHints)
|
||||
klog.Infof("[topologymanager] ContainerTopologyHint: %v", bestHint)
|
||||
@ -221,7 +221,7 @@ func (m *manager) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitR
|
||||
hints := make(map[string]TopologyHint)
|
||||
|
||||
for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
|
||||
result, admit := m.calculateAffinity(*pod, container)
|
||||
result, admit := m.calculateAffinity(pod, &container)
|
||||
if !admit {
|
||||
return lifecycle.PodAdmitResult{
|
||||
Message: "Resources cannot be allocated with Topology locality",
|
||||
|
@ -77,7 +77,7 @@ type mockHintProvider struct {
|
||||
th map[string][]TopologyHint
|
||||
}
|
||||
|
||||
func (m *mockHintProvider) GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]TopologyHint {
|
||||
func (m *mockHintProvider) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]TopologyHint {
|
||||
return m.th
|
||||
}
|
||||
|
||||
@ -223,7 +223,7 @@ func TestAccumulateProvidersHints(t *testing.T) {
|
||||
mngr := manager{
|
||||
hintProviders: tc.hp,
|
||||
}
|
||||
actual := mngr.accumulateProvidersHints(v1.Pod{}, v1.Container{})
|
||||
actual := mngr.accumulateProvidersHints(&v1.Pod{}, &v1.Container{})
|
||||
if !reflect.DeepEqual(actual, tc.expected) {
|
||||
t.Errorf("Test Case %s: Expected NUMANodeAffinity in result to be %v, got %v", tc.name, tc.expected, actual)
|
||||
}
|
||||
@ -342,7 +342,7 @@ func TestCalculateAffinity(t *testing.T) {
|
||||
mngr := manager{}
|
||||
mngr.policy = &mockPolicy{}
|
||||
mngr.hintProviders = tc.hp
|
||||
mngr.calculateAffinity(v1.Pod{}, v1.Container{})
|
||||
mngr.calculateAffinity(&v1.Pod{}, &v1.Container{})
|
||||
actual := mngr.policy.(*mockPolicy).ph
|
||||
if !reflect.DeepEqual(tc.expected, actual) {
|
||||
t.Errorf("Test Case: %s", tc.name)
|
||||
|
Loading…
Reference in New Issue
Block a user