From 6d0b6278cd3eafeb1c1f1f440c22ec59e9d800bc Mon Sep 17 00:00:00 2001 From: Tim Allclair Date: Mon, 10 Mar 2025 10:01:23 -0700 Subject: [PATCH] Rename some allocation.Manager methods --- pkg/kubelet/allocation/allocation_manager.go | 14 +++++++------- pkg/kubelet/kubelet.go | 6 +++--- pkg/kubelet/kubelet_pods_test.go | 2 +- pkg/kubelet/kubelet_test.go | 12 ++++++------ .../kuberuntime/kuberuntime_manager_test.go | 2 +- 5 files changed, 18 insertions(+), 18 deletions(-) diff --git a/pkg/kubelet/allocation/allocation_manager.go b/pkg/kubelet/allocation/allocation_manager.go index e4b17627ca9..576612c52a1 100644 --- a/pkg/kubelet/allocation/allocation_manager.go +++ b/pkg/kubelet/allocation/allocation_manager.go @@ -46,8 +46,8 @@ type Manager interface { // Returns the updated (or original) pod, and whether there was an allocation stored. UpdatePodFromAllocation(pod *v1.Pod) (*v1.Pod, bool) - // SetPodAllocation checkpoints the resources allocated to a pod's containers. - SetPodAllocation(allocatedPod *v1.Pod) error + // SetAllocatedResources checkpoints the resources allocated to a pod's containers. + SetAllocatedResources(allocatedPod *v1.Pod) error // SetActuatedResources records the actuated resources of the given container (or the entire // pod, if actuatedContainer is nil). @@ -56,8 +56,8 @@ type Manager interface { // GetActuatedResources returns the stored actuated resources for the container, and whether they exist. GetActuatedResources(podUID types.UID, containerName string) (v1.ResourceRequirements, bool) - // DeletePod removes any stored state for the given pod UID. - DeletePod(uid types.UID) + // RemovePod removes any stored state for the given pod UID. + RemovePod(uid types.UID) // RemoveOrphanedPods removes the stored state for any pods not included in the set of remaining pods. RemoveOrphanedPods(remainingPods sets.Set[types.UID]) @@ -151,8 +151,8 @@ func updatePodFromAllocation(pod *v1.Pod, allocs state.PodResourceAllocation) (* return pod, updated } -// SetPodAllocation checkpoints the resources allocated to a pod's containers -func (m *manager) SetPodAllocation(pod *v1.Pod) error { +// SetAllocatedResources checkpoints the resources allocated to a pod's containers +func (m *manager) SetAllocatedResources(pod *v1.Pod) error { return m.allocated.SetPodResourceAllocation(pod.UID, allocationFromPod(pod)) } @@ -175,7 +175,7 @@ func allocationFromPod(pod *v1.Pod) map[string]v1.ResourceRequirements { return podAlloc } -func (m *manager) DeletePod(uid types.UID) { +func (m *manager) RemovePod(uid types.UID) { if err := m.allocated.Delete(uid, ""); err != nil { // If the deletion fails, it will be retried by RemoveOrphanedPods, so we can safely ignore the error. klog.V(3).ErrorS(err, "Failed to delete pod allocation", "podUID", uid) diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 3004a8d03e6..a26e23b801e 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -2667,7 +2667,7 @@ func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) { continue } // For new pod, checkpoint the resource values at which the Pod has been admitted - if err := kl.allocationManager.SetPodAllocation(allocatedPod); err != nil { + if err := kl.allocationManager.SetAllocatedResources(allocatedPod); err != nil { //TODO(vinaykul,InPlacePodVerticalScaling): Can we recover from this in some way? Investigate klog.ErrorS(err, "SetPodAllocation failed", "pod", klog.KObj(pod)) } @@ -2723,7 +2723,7 @@ func (kl *Kubelet) HandlePodRemoves(pods []*v1.Pod) { start := kl.clock.Now() for _, pod := range pods { kl.podManager.RemovePod(pod) - kl.allocationManager.DeletePod(pod.UID) + kl.allocationManager.RemovePod(pod.UID) pod, mirrorPod, wasMirror := kl.podManager.GetPodAndMirrorPod(pod) if wasMirror { @@ -2909,7 +2909,7 @@ func (kl *Kubelet) handlePodResourcesResize(pod *v1.Pod, podStatus *kubecontaine fit, resizeStatus, resizeMsg := kl.canResizePod(pod) if fit { // Update pod resource allocation checkpoint - if err := kl.allocationManager.SetPodAllocation(pod); err != nil { + if err := kl.allocationManager.SetAllocatedResources(pod); err != nil { return nil, err } for i, container := range pod.Spec.Containers { diff --git a/pkg/kubelet/kubelet_pods_test.go b/pkg/kubelet/kubelet_pods_test.go index e4d3d20093f..c48894cf82a 100644 --- a/pkg/kubelet/kubelet_pods_test.go +++ b/pkg/kubelet/kubelet_pods_test.go @@ -5093,7 +5093,7 @@ func TestConvertToAPIContainerStatusesForResources(t *testing.T) { } else { tPod.Spec.Containers[0].Resources = tc.Resources } - err := kubelet.allocationManager.SetPodAllocation(tPod) + err := kubelet.allocationManager.SetAllocatedResources(tPod) require.NoError(t, err) resources := tc.ActualResources if resources == nil { diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 1d1ec052027..da00c6b3e9d 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -2568,7 +2568,7 @@ func TestPodResourceAllocationReset(t *testing.T) { t.Run(tc.name, func(t *testing.T) { if tc.existingPodAllocation != nil { // when kubelet restarts, AllocatedResources has already existed before adding pod - err := kubelet.allocationManager.SetPodAllocation(tc.existingPodAllocation) + err := kubelet.allocationManager.SetAllocatedResources(tc.existingPodAllocation) if err != nil { t.Fatalf("failed to set pod allocation: %v", err) } @@ -2858,12 +2858,12 @@ func TestHandlePodResourcesResize(t *testing.T) { } if !tt.newResourcesAllocated { - require.NoError(t, kubelet.allocationManager.SetPodAllocation(originalPod)) + require.NoError(t, kubelet.allocationManager.SetAllocatedResources(originalPod)) } else { - require.NoError(t, kubelet.allocationManager.SetPodAllocation(newPod)) + require.NoError(t, kubelet.allocationManager.SetAllocatedResources(newPod)) } require.NoError(t, kubelet.allocationManager.SetActuatedResources(originalPod, nil)) - t.Cleanup(func() { kubelet.allocationManager.DeletePod(originalPod.UID) }) + t.Cleanup(func() { kubelet.allocationManager.RemovePod(originalPod.UID) }) podStatus := &kubecontainer.PodStatus{ ID: originalPod.UID, @@ -3882,7 +3882,7 @@ func TestIsPodResizeInProgress(t *testing.T) { UID: "12345", }, } - t.Cleanup(func() { am.DeletePod(pod.UID) }) + t.Cleanup(func() { am.RemovePod(pod.UID) }) podStatus := &kubecontainer.PodStatus{ ID: pod.UID, Name: pod.Name, @@ -3923,7 +3923,7 @@ func TestIsPodResizeInProgress(t *testing.T) { require.False(t, found) } } - require.NoError(t, am.SetPodAllocation(pod)) + require.NoError(t, am.SetAllocatedResources(pod)) hasResizedResources := kl.isPodResizeInProgress(pod, podStatus) require.Equal(t, test.expectHasResize, hasResizedResources, "hasResizedResources") diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go index 74b28daf061..c36a0e78f68 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager_test.go @@ -2898,7 +2898,7 @@ func TestComputePodActionsForPodResize(t *testing.T) { if test.setupFn != nil { test.setupFn(pod) } - t.Cleanup(func() { m.allocationManager.DeletePod(pod.UID) }) + t.Cleanup(func() { m.allocationManager.RemovePod(pod.UID) }) for idx := range pod.Spec.Containers { // compute hash