From 212c4c485172ebda99cd639969f1cc1c32ba6ba7 Mon Sep 17 00:00:00 2001 From: likakuli <1154584512@qq.com> Date: Thu, 14 Sep 2023 17:13:04 +0800 Subject: [PATCH] feat: revert #103979 for it's duplicated Signed-off-by: likakuli <1154584512@qq.com> --- pkg/kubelet/cm/cpumanager/cpu_manager.go | 27 +--- pkg/kubelet/cm/cpumanager/cpu_manager_test.go | 4 +- .../cm/cpumanager/topology_hints_test.go | 6 - pkg/kubelet/cm/devicemanager/manager.go | 24 +--- .../cm/devicemanager/topology_hints.go | 8 -- .../cm/memorymanager/memory_manager.go | 31 +---- .../cm/memorymanager/memory_manager_test.go | 128 +----------------- 7 files changed, 11 insertions(+), 217 deletions(-) diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager.go b/pkg/kubelet/cm/cpumanager/cpu_manager.go index b17af7ff8fa..0986e57bea1 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_manager.go +++ b/pkg/kubelet/cm/cpumanager/cpu_manager.go @@ -146,9 +146,6 @@ type manager struct { // allocatableCPUs is the set of online CPUs as reported by the system, // and available for allocation, minus the reserved set allocatableCPUs cpuset.CPUSet - - // pendingAdmissionPod contain the pod during the admission phase - pendingAdmissionPod *v1.Pod } var _ Manager = &manager{} @@ -254,10 +251,6 @@ func (m *manager) Start(activePods ActivePodsFunc, sourcesReady config.SourcesRe } func (m *manager) Allocate(p *v1.Pod, c *v1.Container) error { - // The pod is during the admission phase. We need to save the pod to avoid it - // being cleaned before the admission ended - m.setPodPendingAdmission(p) - // Garbage collect any stranded resources before allocating CPUs. m.removeStaleState() @@ -326,9 +319,6 @@ func (m *manager) State() state.Reader { } func (m *manager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint { - // The pod is during the admission phase. We need to save the pod to avoid it - // being cleaned before the admission ended - m.setPodPendingAdmission(pod) // Garbage collect any stranded resources before providing TopologyHints m.removeStaleState() // Delegate to active policy @@ -336,9 +326,6 @@ func (m *manager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[str } func (m *manager) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint { - // The pod is during the admission phase. We need to save the pod to avoid it - // being cleaned before the admission ended - m.setPodPendingAdmission(pod) // Garbage collect any stranded resources before providing TopologyHints m.removeStaleState() // Delegate to active policy @@ -375,14 +362,11 @@ func (m *manager) removeStaleState() { defer m.Unlock() // Get the list of active pods. - activeAndAdmittedPods := m.activePods() - if m.pendingAdmissionPod != nil { - activeAndAdmittedPods = append(activeAndAdmittedPods, m.pendingAdmissionPod) - } + activePods := m.activePods() // Build a list of (podUID, containerName) pairs for all containers in all active Pods. activeContainers := make(map[string]map[string]struct{}) - for _, pod := range activeAndAdmittedPods { + for _, pod := range activePods { activeContainers[string(pod.UID)] = make(map[string]struct{}) for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) { activeContainers[string(pod.UID)][container.Name] = struct{}{} @@ -554,10 +538,3 @@ func (m *manager) GetExclusiveCPUs(podUID, containerName string) cpuset.CPUSet { func (m *manager) GetCPUAffinity(podUID, containerName string) cpuset.CPUSet { return m.state.GetCPUSetOrDefault(podUID, containerName) } - -func (m *manager) setPodPendingAdmission(pod *v1.Pod) { - m.Lock() - defer m.Unlock() - - m.pendingAdmissionPod = pod -} diff --git a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go index 0630032c511..0e4767b6cc9 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_manager_test.go +++ b/pkg/kubelet/cm/cpumanager/cpu_manager_test.go @@ -325,7 +325,7 @@ func TestCPUManagerAdd(t *testing.T) { pod := makePod("fakePod", "fakeContainer", "2", "2") container := &pod.Spec.Containers[0] - mgr.activePods = func() []*v1.Pod { return nil } + mgr.activePods = func() []*v1.Pod { return []*v1.Pod{pod} } err := mgr.Allocate(pod, container) if !reflect.DeepEqual(err, testCase.expAllocateErr) { @@ -1321,7 +1321,7 @@ func TestCPUManagerAddWithResvList(t *testing.T) { pod := makePod("fakePod", "fakeContainer", "2", "2") container := &pod.Spec.Containers[0] - mgr.activePods = func() []*v1.Pod { return nil } + mgr.activePods = func() []*v1.Pod { return []*v1.Pod{pod} } err := mgr.Allocate(pod, container) if !reflect.DeepEqual(err, testCase.expAllocateErr) { diff --git a/pkg/kubelet/cm/cpumanager/topology_hints_test.go b/pkg/kubelet/cm/cpumanager/topology_hints_test.go index dc8f0d49834..322f45baa77 100644 --- a/pkg/kubelet/cm/cpumanager/topology_hints_test.go +++ b/pkg/kubelet/cm/cpumanager/topology_hints_test.go @@ -245,11 +245,6 @@ func TestGetTopologyHints(t *testing.T) { if len(tc.expectedHints) == 0 && len(hints) == 0 { continue } - - if m.pendingAdmissionPod == nil { - t.Errorf("The pendingAdmissionPod should point to the current pod after the call to GetTopologyHints()") - } - sort.SliceStable(hints, func(i, j int) bool { return hints[i].LessThan(hints[j]) }) @@ -298,7 +293,6 @@ func TestGetPodTopologyHints(t *testing.T) { if len(tc.expectedHints) == 0 && len(podHints) == 0 { continue } - sort.SliceStable(podHints, func(i, j int) bool { return podHints[i].LessThan(podHints[j]) }) diff --git a/pkg/kubelet/cm/devicemanager/manager.go b/pkg/kubelet/cm/devicemanager/manager.go index 32a618e7221..c6ccb4e7f83 100644 --- a/pkg/kubelet/cm/devicemanager/manager.go +++ b/pkg/kubelet/cm/devicemanager/manager.go @@ -101,9 +101,6 @@ type ManagerImpl struct { // init containers. devicesToReuse PodReusableDevices - // pendingAdmissionPod contain the pod during the admission phase - pendingAdmissionPod *v1.Pod - // containerMap provides a mapping from (pod, container) -> containerID // for all containers in a pod. Used to detect pods running across a restart containerMap containermap.ContainerMap @@ -364,10 +361,6 @@ func (m *ManagerImpl) Stop() error { // Allocate is the call that you can use to allocate a set of devices // from the registered device plugins. func (m *ManagerImpl) Allocate(pod *v1.Pod, container *v1.Container) error { - // The pod is during the admission phase. We need to save the pod to avoid it - // being cleaned before the admission ended - m.setPodPendingAdmission(pod) - if _, ok := m.devicesToReuse[string(pod.UID)]; !ok { m.devicesToReuse[string(pod.UID)] = make(map[string]sets.Set[string]) } @@ -548,20 +541,14 @@ func (m *ManagerImpl) getCheckpoint() (checkpoint.DeviceManagerCheckpoint, error // UpdateAllocatedDevices frees any Devices that are bound to terminated pods. func (m *ManagerImpl) UpdateAllocatedDevices() { + activePods := m.activePods() if !m.sourcesReady.AllReady() { return } - m.mutex.Lock() defer m.mutex.Unlock() - - activeAndAdmittedPods := m.activePods() - if m.pendingAdmissionPod != nil { - activeAndAdmittedPods = append(activeAndAdmittedPods, m.pendingAdmissionPod) - } - podsToBeRemoved := m.podDevices.pods() - for _, pod := range activeAndAdmittedPods { + for _, pod := range activePods { podsToBeRemoved.Delete(string(pod.UID)) } if len(podsToBeRemoved) <= 0 { @@ -1171,13 +1158,6 @@ func (m *ManagerImpl) ShouldResetExtendedResourceCapacity() bool { return len(checkpoints) == 0 } -func (m *ManagerImpl) setPodPendingAdmission(pod *v1.Pod) { - m.mutex.Lock() - defer m.mutex.Unlock() - - m.pendingAdmissionPod = pod -} - func (m *ManagerImpl) isContainerAlreadyRunning(podUID, cntName string) bool { cntID, err := m.containerMap.GetContainerID(podUID, cntName) if err != nil { diff --git a/pkg/kubelet/cm/devicemanager/topology_hints.go b/pkg/kubelet/cm/devicemanager/topology_hints.go index 16630daad81..c7fd4a790e0 100644 --- a/pkg/kubelet/cm/devicemanager/topology_hints.go +++ b/pkg/kubelet/cm/devicemanager/topology_hints.go @@ -31,10 +31,6 @@ import ( // ensures the Device Manager is consulted when Topology Aware Hints for each // container are created. func (m *ManagerImpl) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint { - // The pod is during the admission phase. We need to save the pod to avoid it - // being cleaned before the admission ended - m.setPodPendingAdmission(pod) - // Garbage collect any stranded device resources before providing TopologyHints m.UpdateAllocatedDevices() @@ -87,10 +83,6 @@ func (m *ManagerImpl) GetTopologyHints(pod *v1.Pod, container *v1.Container) map // GetPodTopologyHints implements the topologymanager.HintProvider Interface which // ensures the Device Manager is consulted when Topology Aware Hints for Pod are created. func (m *ManagerImpl) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint { - // The pod is during the admission phase. We need to save the pod to avoid it - // being cleaned before the admission ended - m.setPodPendingAdmission(pod) - // Garbage collect any stranded device resources before providing TopologyHints m.UpdateAllocatedDevices() diff --git a/pkg/kubelet/cm/memorymanager/memory_manager.go b/pkg/kubelet/cm/memorymanager/memory_manager.go index 7acc5f54229..831afb193ea 100644 --- a/pkg/kubelet/cm/memorymanager/memory_manager.go +++ b/pkg/kubelet/cm/memorymanager/memory_manager.go @@ -126,9 +126,6 @@ type manager struct { // allocatableMemory holds the allocatable memory for each NUMA node allocatableMemory []state.Block - - // pendingAdmissionPod contain the pod during the admission phase - pendingAdmissionPod *v1.Pod } var _ Manager = &manager{} @@ -242,10 +239,6 @@ func (m *manager) GetMemoryNUMANodes(pod *v1.Pod, container *v1.Container) sets. // Allocate is called to pre-allocate memory resources during Pod admission. func (m *manager) Allocate(pod *v1.Pod, container *v1.Container) error { - // The pod is during the admission phase. We need to save the pod to avoid it - // being cleaned before the admission ended - m.setPodPendingAdmission(pod) - // Garbage collect any stranded resources before allocation m.removeStaleState() @@ -284,10 +277,6 @@ func (m *manager) State() state.Reader { // GetPodTopologyHints returns the topology hints for the topology manager func (m *manager) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint { - // The pod is during the admission phase. We need to save the pod to avoid it - // being cleaned before the admission ended - m.setPodPendingAdmission(pod) - // Garbage collect any stranded resources before providing TopologyHints m.removeStaleState() // Delegate to active policy @@ -296,10 +285,6 @@ func (m *manager) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager. // GetTopologyHints returns the topology hints for the topology manager func (m *manager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint { - // The pod is during the admission phase. We need to save the pod to avoid it - // being cleaned before the admission ended - m.setPodPendingAdmission(pod) - // Garbage collect any stranded resources before providing TopologyHints m.removeStaleState() // Delegate to active policy @@ -322,15 +307,12 @@ func (m *manager) removeStaleState() { m.Lock() defer m.Unlock() - // Get the list of admitted and active pods. - activeAndAdmittedPods := m.activePods() - if m.pendingAdmissionPod != nil { - activeAndAdmittedPods = append(activeAndAdmittedPods, m.pendingAdmissionPod) - } + // Get the list of active pods. + activePods := m.activePods() // Build a list of (podUID, containerName) pairs for all containers in all active Pods. activeContainers := make(map[string]map[string]struct{}) - for _, pod := range activeAndAdmittedPods { + for _, pod := range activePods { activeContainers[string(pod.UID)] = make(map[string]struct{}) for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) { activeContainers[string(pod.UID)][container.Name] = struct{}{} @@ -464,10 +446,3 @@ func (m *manager) GetAllocatableMemory() []state.Block { func (m *manager) GetMemory(podUID, containerName string) []state.Block { return m.state.GetMemoryBlocks(podUID, containerName) } - -func (m *manager) setPodPendingAdmission(pod *v1.Pod) { - m.Lock() - defer m.Unlock() - - m.pendingAdmissionPod = pod -} diff --git a/pkg/kubelet/cm/memorymanager/memory_manager_test.go b/pkg/kubelet/cm/memorymanager/memory_manager_test.go index 5b5a3e1999e..44c654be157 100644 --- a/pkg/kubelet/cm/memorymanager/memory_manager_test.go +++ b/pkg/kubelet/cm/memorymanager/memory_manager_test.go @@ -2019,129 +2019,6 @@ func TestNewManager(t *testing.T) { func TestGetTopologyHints(t *testing.T) { testCases := []testMemoryManager{ - { - description: "Successful hint generation", - policyName: policyTypeStatic, - machineInfo: returnMachineInfo(), - reserved: systemReservedMemory{ - 0: map[v1.ResourceName]uint64{ - v1.ResourceMemory: 1 * gb, - }, - 1: map[v1.ResourceName]uint64{ - v1.ResourceMemory: 1 * gb, - }, - }, - assignments: state.ContainerMemoryAssignments{ - "fakePod1": map[string][]state.Block{ - "fakeContainer1": { - { - NUMAAffinity: []int{0}, - Type: v1.ResourceMemory, - Size: 1 * gb, - }, - { - NUMAAffinity: []int{0}, - Type: hugepages1Gi, - Size: 1 * gb, - }, - }, - "fakeContainer2": { - { - NUMAAffinity: []int{0}, - Type: v1.ResourceMemory, - Size: 1 * gb, - }, - { - NUMAAffinity: []int{0}, - Type: hugepages1Gi, - Size: 1 * gb, - }, - }, - }, - }, - machineState: state.NUMANodeMap{ - 0: &state.NUMANodeState{ - Cells: []int{0}, - NumberOfAssignments: 4, - MemoryMap: map[v1.ResourceName]*state.MemoryTable{ - v1.ResourceMemory: { - Allocatable: 9 * gb, - Free: 7 * gb, - Reserved: 2 * gb, - SystemReserved: 1 * gb, - TotalMemSize: 10 * gb, - }, - hugepages1Gi: { - Allocatable: 5 * gb, - Free: 3 * gb, - Reserved: 2 * gb, - SystemReserved: 0 * gb, - TotalMemSize: 5 * gb, - }, - }, - }, - 1: &state.NUMANodeState{ - Cells: []int{1}, - NumberOfAssignments: 0, - MemoryMap: map[v1.ResourceName]*state.MemoryTable{ - v1.ResourceMemory: { - Allocatable: 9 * gb, - Free: 9 * gb, - Reserved: 0 * gb, - SystemReserved: 1 * gb, - TotalMemSize: 10 * gb, - }, - hugepages1Gi: { - Allocatable: 5 * gb, - Free: 5 * gb, - Reserved: 0, - SystemReserved: 0, - TotalMemSize: 5 * gb, - }, - }, - }, - }, - expectedError: nil, - expectedHints: map[string][]topologymanager.TopologyHint{ - string(v1.ResourceMemory): { - { - NUMANodeAffinity: newNUMAAffinity(0), - Preferred: true, - }, - { - NUMANodeAffinity: newNUMAAffinity(1), - Preferred: true, - }, - }, - string(hugepages1Gi): { - { - NUMANodeAffinity: newNUMAAffinity(0), - Preferred: true, - }, - { - NUMANodeAffinity: newNUMAAffinity(1), - Preferred: true, - }, - }, - }, - activePods: []*v1.Pod{ - { - ObjectMeta: metav1.ObjectMeta{ - UID: "fakePod1", - }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "fakeContainer1", - }, - { - Name: "fakeContainer2", - }, - }, - }, - }, - }, - }, { description: "Successful hint generation", policyName: policyTypeStatic, @@ -2255,7 +2132,6 @@ func TestGetTopologyHints(t *testing.T) { }, }, }, - activePods: []*v1.Pod{}, }, } @@ -2268,14 +2144,14 @@ func TestGetTopologyHints(t *testing.T) { containerRuntime: mockRuntimeService{ err: nil, }, - activePods: func() []*v1.Pod { return testCase.activePods }, + activePods: func() []*v1.Pod { return nil }, podStatusProvider: mockPodStatusProvider{}, } mgr.sourcesReady = &sourcesReadyStub{} mgr.state.SetMachineState(testCase.machineState.Clone()) mgr.state.SetMemoryAssignments(testCase.assignments.Clone()) - pod := getPod("fakePod2", "fakeContainer1", requirementsGuaranteed) + pod := getPod("fakePod1", "fakeContainer1", requirementsGuaranteed) container := &pod.Spec.Containers[0] hints := mgr.GetTopologyHints(pod, container) if !reflect.DeepEqual(hints, testCase.expectedHints) {