Merge pull request #87758 from klueska/upstream-cleanup-topology-manager

Cleanup TopologyManager and update policy.Merge()
This commit is contained in:
Kubernetes Prow Robot 2020-02-06 17:46:27 -08:00 committed by GitHub
commit 6ac8f2c70c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 77 additions and 117 deletions

View File

@ -71,7 +71,7 @@ type Manager interface {
// GetTopologyHints implements the topologymanager.HintProvider Interface
// and is consulted to achieve NUMA aware resource alignment among this
// and other resource controllers.
GetTopologyHints(v1.Pod, v1.Container) map[string][]topologymanager.TopologyHint
GetTopologyHints(*v1.Pod, *v1.Container) map[string][]topologymanager.TopologyHint
}
type manager struct {
@ -298,7 +298,7 @@ func (m *manager) State() state.Reader {
return m.state
}
func (m *manager) GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint {
func (m *manager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
// Garbage collect any stranded resources before providing TopologyHints
m.removeStaleState()
// Delegate to active policy

View File

@ -112,7 +112,7 @@ func (p *mockPolicy) RemoveContainer(s state.State, podUID string, containerName
return p.err
}
func (p *mockPolicy) GetTopologyHints(s state.State, pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint {
func (p *mockPolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
return nil
}

View File

@ -50,7 +50,7 @@ func (m *fakeManager) RemoveContainer(containerID string) error {
return nil
}
func (m *fakeManager) GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint {
func (m *fakeManager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
klog.Infof("[fake cpumanager] Get Topology Hints")
return map[string][]topologymanager.TopologyHint{}
}

View File

@ -33,5 +33,5 @@ type Policy interface {
// GetTopologyHints implements the topologymanager.HintProvider Interface
// and is consulted to achieve NUMA aware resource alignment among this
// and other resource controllers.
GetTopologyHints(s state.State, pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint
GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint
}

View File

@ -52,6 +52,6 @@ func (p *nonePolicy) RemoveContainer(s state.State, podUID string, containerName
return nil
}
func (p *nonePolicy) GetTopologyHints(s state.State, pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint {
func (p *nonePolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
return nil
}

View File

@ -276,7 +276,7 @@ func (p *staticPolicy) guaranteedCPUs(pod *v1.Pod, container *v1.Container) int
return int(cpuQuantity.Value())
}
func (p *staticPolicy) GetTopologyHints(s state.State, pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint {
func (p *staticPolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
// If there are no CPU resources requested for this container, we do not
// generate any topology hints.
if _, ok := container.Resources.Requests[v1.ResourceCPU]; !ok {
@ -284,7 +284,7 @@ func (p *staticPolicy) GetTopologyHints(s state.State, pod v1.Pod, container v1.
}
// Get a count of how many guaranteed CPUs have been requested.
requested := p.guaranteedCPUs(&pod, &container)
requested := p.guaranteedCPUs(pod, container)
// If there are no guaranteed CPUs being requested, we do not generate
// any topology hints. This can happen, for example, because init

View File

@ -265,7 +265,7 @@ func TestGetTopologyHints(t *testing.T) {
sourcesReady: &sourcesReadyStub{},
}
hints := m.GetTopologyHints(tc.pod, tc.container)[string(v1.ResourceCPU)]
hints := m.GetTopologyHints(&tc.pod, &tc.container)[string(v1.ResourceCPU)]
if len(tc.expectedHints) == 0 && len(hints) == 0 {
continue
}

View File

@ -65,7 +65,7 @@ func (h *ManagerStub) GetWatcherHandler() cache.PluginHandler {
}
// GetTopologyHints returns an empty TopologyHint map
func (h *ManagerStub) GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint {
func (h *ManagerStub) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
return map[string][]topologymanager.TopologyHint{}
}

View File

@ -27,7 +27,7 @@ import (
// GetTopologyHints implements the TopologyManager HintProvider Interface which
// ensures the Device Manager is consulted when Topology Aware Hints for each
// container are created.
func (m *ManagerImpl) GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint {
func (m *ManagerImpl) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
// Garbage collect any stranded device resources before providing TopologyHints
m.updateAllocatedDevices(m.activePods())

View File

@ -413,7 +413,7 @@ func TestGetTopologyHints(t *testing.T) {
}
}
hints := m.GetTopologyHints(*pod, pod.Spec.Containers[0])
hints := m.GetTopologyHints(pod, &pod.Spec.Containers[0])
for r := range tc.expectedHints {
sort.SliceStable(hints[r], func(i, j int) bool {

View File

@ -67,7 +67,7 @@ type Manager interface {
// TopologyManager HintProvider provider indicates the Device Manager implements the Topology Manager Interface
// and is consulted to make Topology aware resource alignments
GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]topologymanager.TopologyHint
GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint
}
// DeviceRunContainerOptions contains the combined container runtime settings to consume its allocated devices.

View File

@ -19,7 +19,6 @@ package topologymanager
import (
"k8s.io/klog"
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
)
// Policy interface for Topology Manager Pod Admit Result
@ -28,7 +27,7 @@ type Policy interface {
Name() string
// Returns a merged TopologyHint based on input from hint providers
// and a Pod Admit Handler Response based on hints and policy type
Merge(providersHints []map[string][]TopologyHint) (TopologyHint, lifecycle.PodAdmitResult)
Merge(providersHints []map[string][]TopologyHint) (TopologyHint, bool)
}
// Merge a TopologyHints permutation to a single hint by performing a bitwise-AND

View File

@ -16,10 +16,6 @@ limitations under the License.
package topologymanager
import (
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
)
type bestEffortPolicy struct {
//List of NUMA Nodes available on the underlying machine
numaNodes []int
@ -39,13 +35,11 @@ func (p *bestEffortPolicy) Name() string {
return PolicyBestEffort
}
func (p *bestEffortPolicy) canAdmitPodResult(hint *TopologyHint) lifecycle.PodAdmitResult {
return lifecycle.PodAdmitResult{
Admit: true,
}
func (p *bestEffortPolicy) canAdmitPodResult(hint *TopologyHint) bool {
return true
}
func (p *bestEffortPolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, lifecycle.PodAdmitResult) {
func (p *bestEffortPolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, bool) {
filteredProvidersHints := filterProvidersHints(providersHints)
bestHint := mergeFilteredHints(p.numaNodes, filteredProvidersHints)
admit := p.canAdmitPodResult(&bestHint)

View File

@ -43,8 +43,8 @@ func TestPolicyBestEffortCanAdmitPodResult(t *testing.T) {
policy := NewBestEffortPolicy(numaNodes)
result := policy.(*bestEffortPolicy).canAdmitPodResult(&tc.hint)
if result.Admit != tc.expected {
t.Errorf("Expected Admit field in result to be %t, got %t", tc.expected, result.Admit)
if result != tc.expected {
t.Errorf("Expected result to be %t, got %t", tc.expected, result)
}
}
}

View File

@ -16,10 +16,6 @@ limitations under the License.
package topologymanager
import (
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
)
type nonePolicy struct{}
var _ Policy = &nonePolicy{}
@ -36,12 +32,10 @@ func (p *nonePolicy) Name() string {
return PolicyNone
}
func (p *nonePolicy) canAdmitPodResult(hint *TopologyHint) lifecycle.PodAdmitResult {
return lifecycle.PodAdmitResult{
Admit: true,
}
func (p *nonePolicy) canAdmitPodResult(hint *TopologyHint) bool {
return true
}
func (p *nonePolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, lifecycle.PodAdmitResult) {
func (p *nonePolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, bool) {
return TopologyHint{}, p.canAdmitPodResult(nil)
}

View File

@ -17,7 +17,6 @@ limitations under the License.
package topologymanager
import (
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"testing"
)
@ -61,8 +60,8 @@ func TestPolicyNoneCanAdmitPodResult(t *testing.T) {
policy := NewNonePolicy()
result := policy.(*nonePolicy).canAdmitPodResult(&tc.hint)
if result.Admit != tc.expected {
t.Errorf("Expected Admit field in result to be %t, got %t", tc.expected, result.Admit)
if result != tc.expected {
t.Errorf("Expected result to be %t, got %t", tc.expected, result)
}
}
}
@ -72,13 +71,13 @@ func TestPolicyNoneMerge(t *testing.T) {
name string
providersHints []map[string][]TopologyHint
expectedHint TopologyHint
expectedAdmit lifecycle.PodAdmitResult
expectedAdmit bool
}{
{
name: "merged empty providers hints",
providersHints: []map[string][]TopologyHint{},
expectedHint: TopologyHint{},
expectedAdmit: lifecycle.PodAdmitResult{Admit: true},
expectedAdmit: true,
},
{
name: "merge with a single provider with a single preferred resource",
@ -88,7 +87,7 @@ func TestPolicyNoneMerge(t *testing.T) {
},
},
expectedHint: TopologyHint{},
expectedAdmit: lifecycle.PodAdmitResult{Admit: true},
expectedAdmit: true,
},
{
name: "merge with a single provider with a single non-preferred resource",
@ -98,14 +97,14 @@ func TestPolicyNoneMerge(t *testing.T) {
},
},
expectedHint: TopologyHint{},
expectedAdmit: lifecycle.PodAdmitResult{Admit: true},
expectedAdmit: true,
},
}
for _, tc := range tcases {
policy := NewNonePolicy()
result, admit := policy.Merge(tc.providersHints)
if !result.IsEqual(tc.expectedHint) || admit.Admit != tc.expectedAdmit.Admit {
if !result.IsEqual(tc.expectedHint) || admit != tc.expectedAdmit {
t.Errorf("Test Case: %s: Expected merge hint to be %v, got %v", tc.name, tc.expectedHint, result)
}
}

View File

@ -16,10 +16,6 @@ limitations under the License.
package topologymanager
import (
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
)
type restrictedPolicy struct {
bestEffortPolicy
}
@ -38,20 +34,14 @@ func (p *restrictedPolicy) Name() string {
return PolicyRestricted
}
func (p *restrictedPolicy) canAdmitPodResult(hint *TopologyHint) lifecycle.PodAdmitResult {
func (p *restrictedPolicy) canAdmitPodResult(hint *TopologyHint) bool {
if !hint.Preferred {
return lifecycle.PodAdmitResult{
Admit: false,
Reason: "Topology Affinity Error",
Message: "Resources cannot be allocated with Topology Locality",
}
}
return lifecycle.PodAdmitResult{
Admit: true,
return false
}
return true
}
func (p *restrictedPolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, lifecycle.PodAdmitResult) {
func (p *restrictedPolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, bool) {
filteredHints := filterProvidersHints(providersHints)
hint := mergeFilteredHints(p.numaNodes, filteredHints)
admit := p.canAdmitPodResult(&hint)

View File

@ -61,17 +61,8 @@ func TestPolicyRestrictedCanAdmitPodResult(t *testing.T) {
policy := NewRestrictedPolicy(numaNodes)
result := policy.(*restrictedPolicy).canAdmitPodResult(&tc.hint)
if result.Admit != tc.expected {
t.Errorf("Expected Admit field in result to be %t, got %t", tc.expected, result.Admit)
}
if tc.expected == false {
if len(result.Reason) == 0 {
t.Errorf("Expected Reason field to be not empty")
}
if len(result.Message) == 0 {
t.Errorf("Expected Message field to be not empty")
}
if result != tc.expected {
t.Errorf("Expected result to be %t, got %t", tc.expected, result)
}
}
}

View File

@ -18,7 +18,6 @@ package topologymanager
import (
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager/bitmask"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
)
type singleNumaNodePolicy struct {
@ -40,17 +39,11 @@ func (p *singleNumaNodePolicy) Name() string {
return PolicySingleNumaNode
}
func (p *singleNumaNodePolicy) canAdmitPodResult(hint *TopologyHint) lifecycle.PodAdmitResult {
func (p *singleNumaNodePolicy) canAdmitPodResult(hint *TopologyHint) bool {
if !hint.Preferred {
return lifecycle.PodAdmitResult{
Admit: false,
Reason: "Topology Affinity Error",
Message: "Resources cannot be allocated with Topology Locality",
}
}
return lifecycle.PodAdmitResult{
Admit: true,
return false
}
return true
}
// Return hints that have valid bitmasks with exactly one bit set.
@ -71,7 +64,7 @@ func filterSingleNumaHints(allResourcesHints [][]TopologyHint) [][]TopologyHint
return filteredResourcesHints
}
func (p *singleNumaNodePolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, lifecycle.PodAdmitResult) {
func (p *singleNumaNodePolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, bool) {
filteredHints := filterProvidersHints(providersHints)
// Filter to only include don't cares and hints with a single NUMA node.
singleNumaHints := filterSingleNumaHints(filteredHints)

View File

@ -39,17 +39,8 @@ func TestPolicySingleNumaNodeCanAdmitPodResult(t *testing.T) {
policy := NewSingleNumaNodePolicy(numaNodes)
result := policy.(*singleNumaNodePolicy).canAdmitPodResult(&tc.hint)
if result.Admit != tc.expected {
t.Errorf("Expected Admit field in result to be %t, got %t", tc.expected, result.Admit)
}
if tc.expected == false {
if len(result.Reason) == 0 {
t.Errorf("Expected Reason field to be not empty")
}
if len(result.Message) == 0 {
t.Errorf("Expected Message field to be not empty")
}
if result != tc.expected {
t.Errorf("Expected result to be %t, got %t", tc.expected, result)
}
}
}

View File

@ -858,7 +858,7 @@ func testPolicyMerge(policy Policy, tcases []policyMergeTestCase, t *testing.T)
for _, tc := range tcases {
var providersHints []map[string][]TopologyHint
for _, provider := range tc.hp {
hints := provider.GetTopologyHints(v1.Pod{}, v1.Container{})
hints := provider.GetTopologyHints(&v1.Pod{}, &v1.Container{})
providersHints = append(providersHints, hints)
}

View File

@ -76,7 +76,7 @@ type HintProvider interface {
// this function for each hint provider, and merges the hints to produce
// a consensus "best" hint. The hint providers may subsequently query the
// topology manager to influence actual resource assignment.
GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]TopologyHint
GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]TopologyHint
}
//Store interface is to allow Hint Providers to retrieve pod affinity
@ -124,7 +124,7 @@ func NewManager(numaNodeInfo cputopology.NUMANodeInfo, topologyPolicyName string
numaNodes = append(numaNodes, node)
}
if len(numaNodes) > maxAllowableNUMANodes {
if topologyPolicyName != PolicyNone && len(numaNodes) > maxAllowableNUMANodes {
return nil, fmt.Errorf("unsupported on machines with more than %v NUMA Nodes", maxAllowableNUMANodes)
}
@ -164,7 +164,7 @@ func (m *manager) GetAffinity(podUID string, containerName string) TopologyHint
return m.podTopologyHints[podUID][containerName]
}
func (m *manager) accumulateProvidersHints(pod v1.Pod, container v1.Container) (providersHints []map[string][]TopologyHint) {
func (m *manager) accumulateProvidersHints(pod *v1.Pod, container *v1.Container) (providersHints []map[string][]TopologyHint) {
// Loop through all hint providers and save an accumulated list of the
// hints returned by each hint provider.
for _, provider := range m.hintProviders {
@ -177,7 +177,7 @@ func (m *manager) accumulateProvidersHints(pod v1.Pod, container v1.Container) (
}
// Collect Hints from hint providers and pass to policy to retrieve the best one.
func (m *manager) calculateAffinity(pod v1.Pod, container v1.Container) (TopologyHint, lifecycle.PodAdmitResult) {
func (m *manager) calculateAffinity(pod *v1.Pod, container *v1.Container) (TopologyHint, bool) {
providersHints := m.accumulateProvidersHints(pod, container)
bestHint, admit := m.policy.Merge(providersHints)
klog.Infof("[topologymanager] ContainerTopologyHint: %v", bestHint)
@ -194,35 +194,44 @@ func (m *manager) AddContainer(pod *v1.Pod, containerID string) error {
}
func (m *manager) RemoveContainer(containerID string) error {
klog.Infof("[topologymanager] RemoveContainer - Container ID: %v", containerID)
podUIDString := m.podMap[containerID]
delete(m.podTopologyHints, podUIDString)
delete(m.podMap, containerID)
klog.Infof("[topologymanager] RemoveContainer - Container ID: %v podTopologyHints: %v", containerID, m.podTopologyHints)
if _, exists := m.podTopologyHints[podUIDString]; exists {
delete(m.podTopologyHints[podUIDString], containerID)
if len(m.podTopologyHints[podUIDString]) == 0 {
delete(m.podTopologyHints, podUIDString)
}
}
return nil
}
func (m *manager) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
klog.Infof("[topologymanager] Topology Admit Handler")
if m.policy.Name() == "none" {
klog.Infof("[topologymanager] Skipping calculate topology affinity as policy: none")
return lifecycle.PodAdmitResult{
Admit: true,
}
// Unconditionally admit the pod if we are running with the 'none' policy.
if m.policy.Name() == PolicyNone {
return lifecycle.PodAdmitResult{Admit: true}
}
klog.Infof("[topologymanager] Topology Admit Handler")
pod := attrs.Pod
c := make(map[string]TopologyHint)
hints := make(map[string]TopologyHint)
for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
result, admitPod := m.calculateAffinity(*pod, container)
if !admitPod.Admit {
return admitPod
result, admit := m.calculateAffinity(pod, &container)
if !admit {
return lifecycle.PodAdmitResult{
Message: "Resources cannot be allocated with Topology locality",
Reason: "TopologyAffinityError",
Admit: false,
}
}
c[container.Name] = result
hints[container.Name] = result
}
m.podTopologyHints[string(pod.UID)] = c
m.podTopologyHints[string(pod.UID)] = hints
klog.Infof("[topologymanager] Topology Affinity for Pod: %v are %v", pod.UID, m.podTopologyHints[string(pod.UID)])
return lifecycle.PodAdmitResult{
Admit: true,
}
return lifecycle.PodAdmitResult{Admit: true}
}

View File

@ -77,7 +77,7 @@ type mockHintProvider struct {
th map[string][]TopologyHint
}
func (m *mockHintProvider) GetTopologyHints(pod v1.Pod, container v1.Container) map[string][]TopologyHint {
func (m *mockHintProvider) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]TopologyHint {
return m.th
}
@ -223,7 +223,7 @@ func TestAccumulateProvidersHints(t *testing.T) {
mngr := manager{
hintProviders: tc.hp,
}
actual := mngr.accumulateProvidersHints(v1.Pod{}, v1.Container{})
actual := mngr.accumulateProvidersHints(&v1.Pod{}, &v1.Container{})
if !reflect.DeepEqual(actual, tc.expected) {
t.Errorf("Test Case %s: Expected NUMANodeAffinity in result to be %v, got %v", tc.name, tc.expected, actual)
}
@ -235,9 +235,9 @@ type mockPolicy struct {
ph []map[string][]TopologyHint
}
func (p *mockPolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, lifecycle.PodAdmitResult) {
func (p *mockPolicy) Merge(providersHints []map[string][]TopologyHint) (TopologyHint, bool) {
p.ph = providersHints
return TopologyHint{}, lifecycle.PodAdmitResult{}
return TopologyHint{}, true
}
func TestCalculateAffinity(t *testing.T) {
@ -342,7 +342,7 @@ func TestCalculateAffinity(t *testing.T) {
mngr := manager{}
mngr.policy = &mockPolicy{}
mngr.hintProviders = tc.hp
mngr.calculateAffinity(v1.Pod{}, v1.Container{})
mngr.calculateAffinity(&v1.Pod{}, &v1.Container{})
actual := mngr.policy.(*mockPolicy).ph
if !reflect.DeepEqual(tc.expected, actual) {
t.Errorf("Test Case: %s", tc.name)