mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-12 05:21:58 +00:00
Rename some allocation.Manager methods
This commit is contained in:
parent
d4444dd598
commit
6d0b6278cd
@ -46,8 +46,8 @@ type Manager interface {
|
||||
// Returns the updated (or original) pod, and whether there was an allocation stored.
|
||||
UpdatePodFromAllocation(pod *v1.Pod) (*v1.Pod, bool)
|
||||
|
||||
// SetPodAllocation checkpoints the resources allocated to a pod's containers.
|
||||
SetPodAllocation(allocatedPod *v1.Pod) error
|
||||
// SetAllocatedResources checkpoints the resources allocated to a pod's containers.
|
||||
SetAllocatedResources(allocatedPod *v1.Pod) error
|
||||
|
||||
// SetActuatedResources records the actuated resources of the given container (or the entire
|
||||
// pod, if actuatedContainer is nil).
|
||||
@ -56,8 +56,8 @@ type Manager interface {
|
||||
// GetActuatedResources returns the stored actuated resources for the container, and whether they exist.
|
||||
GetActuatedResources(podUID types.UID, containerName string) (v1.ResourceRequirements, bool)
|
||||
|
||||
// DeletePod removes any stored state for the given pod UID.
|
||||
DeletePod(uid types.UID)
|
||||
// RemovePod removes any stored state for the given pod UID.
|
||||
RemovePod(uid types.UID)
|
||||
|
||||
// RemoveOrphanedPods removes the stored state for any pods not included in the set of remaining pods.
|
||||
RemoveOrphanedPods(remainingPods sets.Set[types.UID])
|
||||
@ -151,8 +151,8 @@ func updatePodFromAllocation(pod *v1.Pod, allocs state.PodResourceAllocation) (*
|
||||
return pod, updated
|
||||
}
|
||||
|
||||
// SetPodAllocation checkpoints the resources allocated to a pod's containers
|
||||
func (m *manager) SetPodAllocation(pod *v1.Pod) error {
|
||||
// SetAllocatedResources checkpoints the resources allocated to a pod's containers
|
||||
func (m *manager) SetAllocatedResources(pod *v1.Pod) error {
|
||||
return m.allocated.SetPodResourceAllocation(pod.UID, allocationFromPod(pod))
|
||||
}
|
||||
|
||||
@ -175,7 +175,7 @@ func allocationFromPod(pod *v1.Pod) map[string]v1.ResourceRequirements {
|
||||
return podAlloc
|
||||
}
|
||||
|
||||
func (m *manager) DeletePod(uid types.UID) {
|
||||
func (m *manager) RemovePod(uid types.UID) {
|
||||
if err := m.allocated.Delete(uid, ""); err != nil {
|
||||
// If the deletion fails, it will be retried by RemoveOrphanedPods, so we can safely ignore the error.
|
||||
klog.V(3).ErrorS(err, "Failed to delete pod allocation", "podUID", uid)
|
||||
|
@ -2667,7 +2667,7 @@ func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) {
|
||||
continue
|
||||
}
|
||||
// For new pod, checkpoint the resource values at which the Pod has been admitted
|
||||
if err := kl.allocationManager.SetPodAllocation(allocatedPod); err != nil {
|
||||
if err := kl.allocationManager.SetAllocatedResources(allocatedPod); err != nil {
|
||||
//TODO(vinaykul,InPlacePodVerticalScaling): Can we recover from this in some way? Investigate
|
||||
klog.ErrorS(err, "SetPodAllocation failed", "pod", klog.KObj(pod))
|
||||
}
|
||||
@ -2723,7 +2723,7 @@ func (kl *Kubelet) HandlePodRemoves(pods []*v1.Pod) {
|
||||
start := kl.clock.Now()
|
||||
for _, pod := range pods {
|
||||
kl.podManager.RemovePod(pod)
|
||||
kl.allocationManager.DeletePod(pod.UID)
|
||||
kl.allocationManager.RemovePod(pod.UID)
|
||||
|
||||
pod, mirrorPod, wasMirror := kl.podManager.GetPodAndMirrorPod(pod)
|
||||
if wasMirror {
|
||||
@ -2909,7 +2909,7 @@ func (kl *Kubelet) handlePodResourcesResize(pod *v1.Pod, podStatus *kubecontaine
|
||||
fit, resizeStatus, resizeMsg := kl.canResizePod(pod)
|
||||
if fit {
|
||||
// Update pod resource allocation checkpoint
|
||||
if err := kl.allocationManager.SetPodAllocation(pod); err != nil {
|
||||
if err := kl.allocationManager.SetAllocatedResources(pod); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, container := range pod.Spec.Containers {
|
||||
|
@ -5093,7 +5093,7 @@ func TestConvertToAPIContainerStatusesForResources(t *testing.T) {
|
||||
} else {
|
||||
tPod.Spec.Containers[0].Resources = tc.Resources
|
||||
}
|
||||
err := kubelet.allocationManager.SetPodAllocation(tPod)
|
||||
err := kubelet.allocationManager.SetAllocatedResources(tPod)
|
||||
require.NoError(t, err)
|
||||
resources := tc.ActualResources
|
||||
if resources == nil {
|
||||
|
@ -2568,7 +2568,7 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
if tc.existingPodAllocation != nil {
|
||||
// when kubelet restarts, AllocatedResources has already existed before adding pod
|
||||
err := kubelet.allocationManager.SetPodAllocation(tc.existingPodAllocation)
|
||||
err := kubelet.allocationManager.SetAllocatedResources(tc.existingPodAllocation)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to set pod allocation: %v", err)
|
||||
}
|
||||
@ -2858,12 +2858,12 @@ func TestHandlePodResourcesResize(t *testing.T) {
|
||||
}
|
||||
|
||||
if !tt.newResourcesAllocated {
|
||||
require.NoError(t, kubelet.allocationManager.SetPodAllocation(originalPod))
|
||||
require.NoError(t, kubelet.allocationManager.SetAllocatedResources(originalPod))
|
||||
} else {
|
||||
require.NoError(t, kubelet.allocationManager.SetPodAllocation(newPod))
|
||||
require.NoError(t, kubelet.allocationManager.SetAllocatedResources(newPod))
|
||||
}
|
||||
require.NoError(t, kubelet.allocationManager.SetActuatedResources(originalPod, nil))
|
||||
t.Cleanup(func() { kubelet.allocationManager.DeletePod(originalPod.UID) })
|
||||
t.Cleanup(func() { kubelet.allocationManager.RemovePod(originalPod.UID) })
|
||||
|
||||
podStatus := &kubecontainer.PodStatus{
|
||||
ID: originalPod.UID,
|
||||
@ -3882,7 +3882,7 @@ func TestIsPodResizeInProgress(t *testing.T) {
|
||||
UID: "12345",
|
||||
},
|
||||
}
|
||||
t.Cleanup(func() { am.DeletePod(pod.UID) })
|
||||
t.Cleanup(func() { am.RemovePod(pod.UID) })
|
||||
podStatus := &kubecontainer.PodStatus{
|
||||
ID: pod.UID,
|
||||
Name: pod.Name,
|
||||
@ -3923,7 +3923,7 @@ func TestIsPodResizeInProgress(t *testing.T) {
|
||||
require.False(t, found)
|
||||
}
|
||||
}
|
||||
require.NoError(t, am.SetPodAllocation(pod))
|
||||
require.NoError(t, am.SetAllocatedResources(pod))
|
||||
|
||||
hasResizedResources := kl.isPodResizeInProgress(pod, podStatus)
|
||||
require.Equal(t, test.expectHasResize, hasResizedResources, "hasResizedResources")
|
||||
|
@ -2898,7 +2898,7 @@ func TestComputePodActionsForPodResize(t *testing.T) {
|
||||
if test.setupFn != nil {
|
||||
test.setupFn(pod)
|
||||
}
|
||||
t.Cleanup(func() { m.allocationManager.DeletePod(pod.UID) })
|
||||
t.Cleanup(func() { m.allocationManager.RemovePod(pod.UID) })
|
||||
|
||||
for idx := range pod.Spec.Containers {
|
||||
// compute hash
|
||||
|
Loading…
Reference in New Issue
Block a user