mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-13 13:55:41 +00:00
Rename some allocation.Manager methods
This commit is contained in:
parent
d4444dd598
commit
6d0b6278cd
@ -46,8 +46,8 @@ type Manager interface {
|
|||||||
// Returns the updated (or original) pod, and whether there was an allocation stored.
|
// Returns the updated (or original) pod, and whether there was an allocation stored.
|
||||||
UpdatePodFromAllocation(pod *v1.Pod) (*v1.Pod, bool)
|
UpdatePodFromAllocation(pod *v1.Pod) (*v1.Pod, bool)
|
||||||
|
|
||||||
// SetPodAllocation checkpoints the resources allocated to a pod's containers.
|
// SetAllocatedResources checkpoints the resources allocated to a pod's containers.
|
||||||
SetPodAllocation(allocatedPod *v1.Pod) error
|
SetAllocatedResources(allocatedPod *v1.Pod) error
|
||||||
|
|
||||||
// SetActuatedResources records the actuated resources of the given container (or the entire
|
// SetActuatedResources records the actuated resources of the given container (or the entire
|
||||||
// pod, if actuatedContainer is nil).
|
// pod, if actuatedContainer is nil).
|
||||||
@ -56,8 +56,8 @@ type Manager interface {
|
|||||||
// GetActuatedResources returns the stored actuated resources for the container, and whether they exist.
|
// GetActuatedResources returns the stored actuated resources for the container, and whether they exist.
|
||||||
GetActuatedResources(podUID types.UID, containerName string) (v1.ResourceRequirements, bool)
|
GetActuatedResources(podUID types.UID, containerName string) (v1.ResourceRequirements, bool)
|
||||||
|
|
||||||
// DeletePod removes any stored state for the given pod UID.
|
// RemovePod removes any stored state for the given pod UID.
|
||||||
DeletePod(uid types.UID)
|
RemovePod(uid types.UID)
|
||||||
|
|
||||||
// RemoveOrphanedPods removes the stored state for any pods not included in the set of remaining pods.
|
// RemoveOrphanedPods removes the stored state for any pods not included in the set of remaining pods.
|
||||||
RemoveOrphanedPods(remainingPods sets.Set[types.UID])
|
RemoveOrphanedPods(remainingPods sets.Set[types.UID])
|
||||||
@ -151,8 +151,8 @@ func updatePodFromAllocation(pod *v1.Pod, allocs state.PodResourceAllocation) (*
|
|||||||
return pod, updated
|
return pod, updated
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetPodAllocation checkpoints the resources allocated to a pod's containers
|
// SetAllocatedResources checkpoints the resources allocated to a pod's containers
|
||||||
func (m *manager) SetPodAllocation(pod *v1.Pod) error {
|
func (m *manager) SetAllocatedResources(pod *v1.Pod) error {
|
||||||
return m.allocated.SetPodResourceAllocation(pod.UID, allocationFromPod(pod))
|
return m.allocated.SetPodResourceAllocation(pod.UID, allocationFromPod(pod))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -175,7 +175,7 @@ func allocationFromPod(pod *v1.Pod) map[string]v1.ResourceRequirements {
|
|||||||
return podAlloc
|
return podAlloc
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *manager) DeletePod(uid types.UID) {
|
func (m *manager) RemovePod(uid types.UID) {
|
||||||
if err := m.allocated.Delete(uid, ""); err != nil {
|
if err := m.allocated.Delete(uid, ""); err != nil {
|
||||||
// If the deletion fails, it will be retried by RemoveOrphanedPods, so we can safely ignore the error.
|
// If the deletion fails, it will be retried by RemoveOrphanedPods, so we can safely ignore the error.
|
||||||
klog.V(3).ErrorS(err, "Failed to delete pod allocation", "podUID", uid)
|
klog.V(3).ErrorS(err, "Failed to delete pod allocation", "podUID", uid)
|
||||||
|
@ -2667,7 +2667,7 @@ func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// For new pod, checkpoint the resource values at which the Pod has been admitted
|
// For new pod, checkpoint the resource values at which the Pod has been admitted
|
||||||
if err := kl.allocationManager.SetPodAllocation(allocatedPod); err != nil {
|
if err := kl.allocationManager.SetAllocatedResources(allocatedPod); err != nil {
|
||||||
//TODO(vinaykul,InPlacePodVerticalScaling): Can we recover from this in some way? Investigate
|
//TODO(vinaykul,InPlacePodVerticalScaling): Can we recover from this in some way? Investigate
|
||||||
klog.ErrorS(err, "SetPodAllocation failed", "pod", klog.KObj(pod))
|
klog.ErrorS(err, "SetPodAllocation failed", "pod", klog.KObj(pod))
|
||||||
}
|
}
|
||||||
@ -2723,7 +2723,7 @@ func (kl *Kubelet) HandlePodRemoves(pods []*v1.Pod) {
|
|||||||
start := kl.clock.Now()
|
start := kl.clock.Now()
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
kl.podManager.RemovePod(pod)
|
kl.podManager.RemovePod(pod)
|
||||||
kl.allocationManager.DeletePod(pod.UID)
|
kl.allocationManager.RemovePod(pod.UID)
|
||||||
|
|
||||||
pod, mirrorPod, wasMirror := kl.podManager.GetPodAndMirrorPod(pod)
|
pod, mirrorPod, wasMirror := kl.podManager.GetPodAndMirrorPod(pod)
|
||||||
if wasMirror {
|
if wasMirror {
|
||||||
@ -2909,7 +2909,7 @@ func (kl *Kubelet) handlePodResourcesResize(pod *v1.Pod, podStatus *kubecontaine
|
|||||||
fit, resizeStatus, resizeMsg := kl.canResizePod(pod)
|
fit, resizeStatus, resizeMsg := kl.canResizePod(pod)
|
||||||
if fit {
|
if fit {
|
||||||
// Update pod resource allocation checkpoint
|
// Update pod resource allocation checkpoint
|
||||||
if err := kl.allocationManager.SetPodAllocation(pod); err != nil {
|
if err := kl.allocationManager.SetAllocatedResources(pod); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
for i, container := range pod.Spec.Containers {
|
for i, container := range pod.Spec.Containers {
|
||||||
|
@ -5093,7 +5093,7 @@ func TestConvertToAPIContainerStatusesForResources(t *testing.T) {
|
|||||||
} else {
|
} else {
|
||||||
tPod.Spec.Containers[0].Resources = tc.Resources
|
tPod.Spec.Containers[0].Resources = tc.Resources
|
||||||
}
|
}
|
||||||
err := kubelet.allocationManager.SetPodAllocation(tPod)
|
err := kubelet.allocationManager.SetAllocatedResources(tPod)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
resources := tc.ActualResources
|
resources := tc.ActualResources
|
||||||
if resources == nil {
|
if resources == nil {
|
||||||
|
@ -2568,7 +2568,7 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
|||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
if tc.existingPodAllocation != nil {
|
if tc.existingPodAllocation != nil {
|
||||||
// when kubelet restarts, AllocatedResources has already existed before adding pod
|
// when kubelet restarts, AllocatedResources has already existed before adding pod
|
||||||
err := kubelet.allocationManager.SetPodAllocation(tc.existingPodAllocation)
|
err := kubelet.allocationManager.SetAllocatedResources(tc.existingPodAllocation)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to set pod allocation: %v", err)
|
t.Fatalf("failed to set pod allocation: %v", err)
|
||||||
}
|
}
|
||||||
@ -2858,12 +2858,12 @@ func TestHandlePodResourcesResize(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !tt.newResourcesAllocated {
|
if !tt.newResourcesAllocated {
|
||||||
require.NoError(t, kubelet.allocationManager.SetPodAllocation(originalPod))
|
require.NoError(t, kubelet.allocationManager.SetAllocatedResources(originalPod))
|
||||||
} else {
|
} else {
|
||||||
require.NoError(t, kubelet.allocationManager.SetPodAllocation(newPod))
|
require.NoError(t, kubelet.allocationManager.SetAllocatedResources(newPod))
|
||||||
}
|
}
|
||||||
require.NoError(t, kubelet.allocationManager.SetActuatedResources(originalPod, nil))
|
require.NoError(t, kubelet.allocationManager.SetActuatedResources(originalPod, nil))
|
||||||
t.Cleanup(func() { kubelet.allocationManager.DeletePod(originalPod.UID) })
|
t.Cleanup(func() { kubelet.allocationManager.RemovePod(originalPod.UID) })
|
||||||
|
|
||||||
podStatus := &kubecontainer.PodStatus{
|
podStatus := &kubecontainer.PodStatus{
|
||||||
ID: originalPod.UID,
|
ID: originalPod.UID,
|
||||||
@ -3882,7 +3882,7 @@ func TestIsPodResizeInProgress(t *testing.T) {
|
|||||||
UID: "12345",
|
UID: "12345",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
t.Cleanup(func() { am.DeletePod(pod.UID) })
|
t.Cleanup(func() { am.RemovePod(pod.UID) })
|
||||||
podStatus := &kubecontainer.PodStatus{
|
podStatus := &kubecontainer.PodStatus{
|
||||||
ID: pod.UID,
|
ID: pod.UID,
|
||||||
Name: pod.Name,
|
Name: pod.Name,
|
||||||
@ -3923,7 +3923,7 @@ func TestIsPodResizeInProgress(t *testing.T) {
|
|||||||
require.False(t, found)
|
require.False(t, found)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
require.NoError(t, am.SetPodAllocation(pod))
|
require.NoError(t, am.SetAllocatedResources(pod))
|
||||||
|
|
||||||
hasResizedResources := kl.isPodResizeInProgress(pod, podStatus)
|
hasResizedResources := kl.isPodResizeInProgress(pod, podStatus)
|
||||||
require.Equal(t, test.expectHasResize, hasResizedResources, "hasResizedResources")
|
require.Equal(t, test.expectHasResize, hasResizedResources, "hasResizedResources")
|
||||||
|
@ -2898,7 +2898,7 @@ func TestComputePodActionsForPodResize(t *testing.T) {
|
|||||||
if test.setupFn != nil {
|
if test.setupFn != nil {
|
||||||
test.setupFn(pod)
|
test.setupFn(pod)
|
||||||
}
|
}
|
||||||
t.Cleanup(func() { m.allocationManager.DeletePod(pod.UID) })
|
t.Cleanup(func() { m.allocationManager.RemovePod(pod.UID) })
|
||||||
|
|
||||||
for idx := range pod.Spec.Containers {
|
for idx := range pod.Spec.Containers {
|
||||||
// compute hash
|
// compute hash
|
||||||
|
Loading…
Reference in New Issue
Block a user