mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 20:53:33 +00:00
Replace PodResourceAllocation with PodResourceInfoMap type for
extensibility for pod-level resources support
This commit is contained in:
parent
473ec01548
commit
091b450057
@ -103,18 +103,18 @@ func NewInMemoryManager() Manager {
|
|||||||
// GetContainerResourceAllocation returns the last checkpointed AllocatedResources values
|
// GetContainerResourceAllocation returns the last checkpointed AllocatedResources values
|
||||||
// If checkpoint manager has not been initialized, it returns nil, false
|
// If checkpoint manager has not been initialized, it returns nil, false
|
||||||
func (m *manager) GetContainerResourceAllocation(podUID types.UID, containerName string) (v1.ResourceRequirements, bool) {
|
func (m *manager) GetContainerResourceAllocation(podUID types.UID, containerName string) (v1.ResourceRequirements, bool) {
|
||||||
return m.allocated.GetContainerResourceAllocation(podUID, containerName)
|
return m.allocated.GetContainerResources(podUID, containerName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdatePodFromAllocation overwrites the pod spec with the allocation.
|
// UpdatePodFromAllocation overwrites the pod spec with the allocation.
|
||||||
// This function does a deep copy only if updates are needed.
|
// This function does a deep copy only if updates are needed.
|
||||||
func (m *manager) UpdatePodFromAllocation(pod *v1.Pod) (*v1.Pod, bool) {
|
func (m *manager) UpdatePodFromAllocation(pod *v1.Pod) (*v1.Pod, bool) {
|
||||||
// TODO(tallclair): This clones the whole cache, but we only need 1 pod.
|
// TODO(tallclair): This clones the whole cache, but we only need 1 pod.
|
||||||
allocs := m.allocated.GetPodResourceAllocation()
|
allocs := m.allocated.GetPodResourceInfoMap()
|
||||||
return updatePodFromAllocation(pod, allocs)
|
return updatePodFromAllocation(pod, allocs)
|
||||||
}
|
}
|
||||||
|
|
||||||
func updatePodFromAllocation(pod *v1.Pod, allocs state.PodResourceAllocation) (*v1.Pod, bool) {
|
func updatePodFromAllocation(pod *v1.Pod, allocs state.PodResourceInfoMap) (*v1.Pod, bool) {
|
||||||
allocated, found := allocs[pod.UID]
|
allocated, found := allocs[pod.UID]
|
||||||
if !found {
|
if !found {
|
||||||
return pod, false
|
return pod, false
|
||||||
@ -122,7 +122,7 @@ func updatePodFromAllocation(pod *v1.Pod, allocs state.PodResourceAllocation) (*
|
|||||||
|
|
||||||
updated := false
|
updated := false
|
||||||
containerAlloc := func(c v1.Container) (v1.ResourceRequirements, bool) {
|
containerAlloc := func(c v1.Container) (v1.ResourceRequirements, bool) {
|
||||||
if cAlloc, ok := allocated[c.Name]; ok {
|
if cAlloc, ok := allocated.ContainerResources[c.Name]; ok {
|
||||||
if !apiequality.Semantic.DeepEqual(c.Resources, cAlloc) {
|
if !apiequality.Semantic.DeepEqual(c.Resources, cAlloc) {
|
||||||
// Allocation differs from pod spec, retrieve the allocation
|
// Allocation differs from pod spec, retrieve the allocation
|
||||||
if !updated {
|
if !updated {
|
||||||
@ -153,21 +153,22 @@ func updatePodFromAllocation(pod *v1.Pod, allocs state.PodResourceAllocation) (*
|
|||||||
|
|
||||||
// SetAllocatedResources checkpoints the resources allocated to a pod's containers
|
// SetAllocatedResources checkpoints the resources allocated to a pod's containers
|
||||||
func (m *manager) SetAllocatedResources(pod *v1.Pod) error {
|
func (m *manager) SetAllocatedResources(pod *v1.Pod) error {
|
||||||
return m.allocated.SetPodResourceAllocation(pod.UID, allocationFromPod(pod))
|
return m.allocated.SetPodResourceInfo(pod.UID, allocationFromPod(pod))
|
||||||
}
|
}
|
||||||
|
|
||||||
func allocationFromPod(pod *v1.Pod) map[string]v1.ResourceRequirements {
|
func allocationFromPod(pod *v1.Pod) state.PodResourceInfo {
|
||||||
podAlloc := make(map[string]v1.ResourceRequirements)
|
var podAlloc state.PodResourceInfo
|
||||||
|
podAlloc.ContainerResources = make(map[string]v1.ResourceRequirements)
|
||||||
for _, container := range pod.Spec.Containers {
|
for _, container := range pod.Spec.Containers {
|
||||||
alloc := *container.Resources.DeepCopy()
|
alloc := *container.Resources.DeepCopy()
|
||||||
podAlloc[container.Name] = alloc
|
podAlloc.ContainerResources[container.Name] = alloc
|
||||||
}
|
}
|
||||||
|
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
|
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
|
||||||
for _, container := range pod.Spec.InitContainers {
|
for _, container := range pod.Spec.InitContainers {
|
||||||
if podutil.IsRestartableInitContainer(&container) {
|
if podutil.IsRestartableInitContainer(&container) {
|
||||||
alloc := *container.Resources.DeepCopy()
|
alloc := *container.Resources.DeepCopy()
|
||||||
podAlloc[container.Name] = alloc
|
podAlloc.ContainerResources[container.Name] = alloc
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -195,12 +196,12 @@ func (m *manager) RemoveOrphanedPods(remainingPods sets.Set[types.UID]) {
|
|||||||
func (m *manager) SetActuatedResources(allocatedPod *v1.Pod, actuatedContainer *v1.Container) error {
|
func (m *manager) SetActuatedResources(allocatedPod *v1.Pod, actuatedContainer *v1.Container) error {
|
||||||
if actuatedContainer == nil {
|
if actuatedContainer == nil {
|
||||||
alloc := allocationFromPod(allocatedPod)
|
alloc := allocationFromPod(allocatedPod)
|
||||||
return m.actuated.SetPodResourceAllocation(allocatedPod.UID, alloc)
|
return m.actuated.SetPodResourceInfo(allocatedPod.UID, alloc)
|
||||||
}
|
}
|
||||||
|
|
||||||
return m.actuated.SetContainerResourceAllocation(allocatedPod.UID, actuatedContainer.Name, actuatedContainer.Resources)
|
return m.actuated.SetContainerResources(allocatedPod.UID, actuatedContainer.Name, actuatedContainer.Resources)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *manager) GetActuatedResources(podUID types.UID, containerName string) (v1.ResourceRequirements, bool) {
|
func (m *manager) GetActuatedResources(podUID types.UID, containerName string) (v1.ResourceRequirements, bool) {
|
||||||
return m.actuated.GetContainerResourceAllocation(podUID, containerName)
|
return m.actuated.GetContainerResources(podUID, containerName)
|
||||||
}
|
}
|
||||||
|
@ -103,44 +103,50 @@ func TestUpdatePodFromAllocation(t *testing.T) {
|
|||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
allocs state.PodResourceAllocation
|
allocs state.PodResourceInfoMap
|
||||||
expectPod *v1.Pod
|
expectPod *v1.Pod
|
||||||
expectUpdate bool
|
expectUpdate bool
|
||||||
}{{
|
}{{
|
||||||
name: "steady state",
|
name: "steady state",
|
||||||
pod: pod,
|
pod: pod,
|
||||||
allocs: state.PodResourceAllocation{
|
allocs: state.PodResourceInfoMap{
|
||||||
pod.UID: map[string]v1.ResourceRequirements{
|
pod.UID: state.PodResourceInfo{
|
||||||
"c1": *pod.Spec.Containers[0].Resources.DeepCopy(),
|
ContainerResources: map[string]v1.ResourceRequirements{
|
||||||
"c2": *pod.Spec.Containers[1].Resources.DeepCopy(),
|
"c1": *pod.Spec.Containers[0].Resources.DeepCopy(),
|
||||||
"c1-restartable-init": *pod.Spec.InitContainers[0].Resources.DeepCopy(),
|
"c2": *pod.Spec.Containers[1].Resources.DeepCopy(),
|
||||||
"c1-init": *pod.Spec.InitContainers[1].Resources.DeepCopy(),
|
"c1-restartable-init": *pod.Spec.InitContainers[0].Resources.DeepCopy(),
|
||||||
|
"c1-init": *pod.Spec.InitContainers[1].Resources.DeepCopy(),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectUpdate: false,
|
expectUpdate: false,
|
||||||
}, {
|
}, {
|
||||||
name: "no allocations",
|
name: "no allocations",
|
||||||
pod: pod,
|
pod: pod,
|
||||||
allocs: state.PodResourceAllocation{},
|
allocs: state.PodResourceInfoMap{},
|
||||||
expectUpdate: false,
|
expectUpdate: false,
|
||||||
}, {
|
}, {
|
||||||
name: "missing container allocation",
|
name: "missing container allocation",
|
||||||
pod: pod,
|
pod: pod,
|
||||||
allocs: state.PodResourceAllocation{
|
allocs: state.PodResourceInfoMap{
|
||||||
pod.UID: map[string]v1.ResourceRequirements{
|
pod.UID: state.PodResourceInfo{
|
||||||
"c2": *pod.Spec.Containers[1].Resources.DeepCopy(),
|
ContainerResources: map[string]v1.ResourceRequirements{
|
||||||
|
"c2": *pod.Spec.Containers[1].Resources.DeepCopy(),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectUpdate: false,
|
expectUpdate: false,
|
||||||
}, {
|
}, {
|
||||||
name: "resized container",
|
name: "resized container",
|
||||||
pod: pod,
|
pod: pod,
|
||||||
allocs: state.PodResourceAllocation{
|
allocs: state.PodResourceInfoMap{
|
||||||
pod.UID: map[string]v1.ResourceRequirements{
|
pod.UID: state.PodResourceInfo{
|
||||||
"c1": *resizedPod.Spec.Containers[0].Resources.DeepCopy(),
|
ContainerResources: map[string]v1.ResourceRequirements{
|
||||||
"c2": *resizedPod.Spec.Containers[1].Resources.DeepCopy(),
|
"c1": *resizedPod.Spec.Containers[0].Resources.DeepCopy(),
|
||||||
"c1-restartable-init": *resizedPod.Spec.InitContainers[0].Resources.DeepCopy(),
|
"c2": *resizedPod.Spec.Containers[1].Resources.DeepCopy(),
|
||||||
"c1-init": *resizedPod.Spec.InitContainers[1].Resources.DeepCopy(),
|
"c1-restartable-init": *resizedPod.Spec.InitContainers[0].Resources.DeepCopy(),
|
||||||
|
"c1-init": *resizedPod.Spec.InitContainers[1].Resources.DeepCopy(),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectUpdate: true,
|
expectUpdate: true,
|
||||||
|
@ -20,16 +20,14 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum"
|
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ checkpointmanager.Checkpoint = &Checkpoint{}
|
var _ checkpointmanager.Checkpoint = &Checkpoint{}
|
||||||
|
|
||||||
type PodResourceAllocationInfo struct {
|
type PodResourceCheckpointInfo struct {
|
||||||
AllocationEntries map[types.UID]map[string]v1.ResourceRequirements `json:"allocationEntries,omitempty"`
|
Entries PodResourceInfoMap `json:"entries,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// Checkpoint represents a structure to store pod resource allocation checkpoint data
|
// Checkpoint represents a structure to store pod resource allocation checkpoint data
|
||||||
@ -41,7 +39,7 @@ type Checkpoint struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewCheckpoint creates a new checkpoint from a list of claim info states
|
// NewCheckpoint creates a new checkpoint from a list of claim info states
|
||||||
func NewCheckpoint(allocations *PodResourceAllocationInfo) (*Checkpoint, error) {
|
func NewCheckpoint(allocations *PodResourceCheckpointInfo) (*Checkpoint, error) {
|
||||||
|
|
||||||
serializedAllocations, err := json.Marshal(allocations)
|
serializedAllocations, err := json.Marshal(allocations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -70,9 +68,9 @@ func (cp *Checkpoint) VerifyChecksum() error {
|
|||||||
return cp.Checksum.Verify(cp.Data)
|
return cp.Checksum.Verify(cp.Data)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPodResourceAllocationInfo returns Pod Resource Allocation info states from checkpoint
|
// GetPodResourceCheckpointInfo returns Pod Resource Allocation info states from checkpoint
|
||||||
func (cp *Checkpoint) GetPodResourceAllocationInfo() (*PodResourceAllocationInfo, error) {
|
func (cp *Checkpoint) GetPodResourceCheckpointInfo() (*PodResourceCheckpointInfo, error) {
|
||||||
var data PodResourceAllocationInfo
|
var data PodResourceCheckpointInfo
|
||||||
if err := json.Unmarshal([]byte(cp.Data), &data); err != nil {
|
if err := json.Unmarshal([]byte(cp.Data), &data); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -22,36 +22,45 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PodResourceAllocation type is used in tracking resources allocated to pod's containers
|
// PodResourceInfo stores resource requirements for containers within a pod.
|
||||||
type PodResourceAllocation map[types.UID]map[string]v1.ResourceRequirements
|
type PodResourceInfo struct {
|
||||||
|
// ContainerResources maps container names to their respective ResourceRequirements.
|
||||||
|
ContainerResources map[string]v1.ResourceRequirements
|
||||||
|
}
|
||||||
|
|
||||||
// Clone returns a copy of PodResourceAllocation
|
// PodResourceInfoMap maps pod UIDs to their corresponding PodResourceInfo,
|
||||||
func (pr PodResourceAllocation) Clone() PodResourceAllocation {
|
// tracking resource requirements for all containers within each pod.
|
||||||
prCopy := make(PodResourceAllocation)
|
type PodResourceInfoMap map[types.UID]PodResourceInfo
|
||||||
for pod := range pr {
|
|
||||||
prCopy[pod] = make(map[string]v1.ResourceRequirements)
|
// Clone returns a copy of PodResourceInfoMap
|
||||||
for container, alloc := range pr[pod] {
|
func (pr PodResourceInfoMap) Clone() PodResourceInfoMap {
|
||||||
prCopy[pod][container] = *alloc.DeepCopy()
|
prCopy := make(PodResourceInfoMap)
|
||||||
|
for podUID, podInfo := range pr {
|
||||||
|
prCopy[podUID] = PodResourceInfo{
|
||||||
|
ContainerResources: make(map[string]v1.ResourceRequirements),
|
||||||
|
}
|
||||||
|
for containerName, containerInfo := range podInfo.ContainerResources {
|
||||||
|
prCopy[podUID].ContainerResources[containerName] = *containerInfo.DeepCopy()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return prCopy
|
return prCopy
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reader interface used to read current pod resource allocation state
|
// Reader interface used to read current pod resource state
|
||||||
type Reader interface {
|
type Reader interface {
|
||||||
GetContainerResourceAllocation(podUID types.UID, containerName string) (v1.ResourceRequirements, bool)
|
GetContainerResources(podUID types.UID, containerName string) (v1.ResourceRequirements, bool)
|
||||||
GetPodResourceAllocation() PodResourceAllocation
|
GetPodResourceInfoMap() PodResourceInfoMap
|
||||||
}
|
}
|
||||||
|
|
||||||
type writer interface {
|
type writer interface {
|
||||||
SetContainerResourceAllocation(podUID types.UID, containerName string, alloc v1.ResourceRequirements) error
|
SetContainerResources(podUID types.UID, containerName string, resources v1.ResourceRequirements) error
|
||||||
SetPodResourceAllocation(podUID types.UID, alloc map[string]v1.ResourceRequirements) error
|
SetPodResourceInfo(podUID types.UID, resourceInfo PodResourceInfo) error
|
||||||
RemovePod(podUID types.UID) error
|
RemovePod(podUID types.UID) error
|
||||||
// RemoveOrphanedPods removes the stored state for any pods not included in the set of remaining pods.
|
// RemoveOrphanedPods removes the stored state for any pods not included in the set of remaining pods.
|
||||||
RemoveOrphanedPods(remainingPods sets.Set[types.UID])
|
RemoveOrphanedPods(remainingPods sets.Set[types.UID])
|
||||||
}
|
}
|
||||||
|
|
||||||
// State interface provides methods for tracking and setting pod resource allocation
|
// State interface provides methods for tracking and setting pod resources
|
||||||
type State interface {
|
type State interface {
|
||||||
Reader
|
Reader
|
||||||
writer
|
writer
|
||||||
|
@ -40,17 +40,17 @@ type stateCheckpoint struct {
|
|||||||
lastChecksum checksum.Checksum
|
lastChecksum checksum.Checksum
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewStateCheckpoint creates new State for keeping track of pod resource allocations with checkpoint backend
|
// NewStateCheckpoint creates new State for keeping track of pod resource information with checkpoint backend
|
||||||
func NewStateCheckpoint(stateDir, checkpointName string) (State, error) {
|
func NewStateCheckpoint(stateDir, checkpointName string) (State, error) {
|
||||||
checkpointManager, err := checkpointmanager.NewCheckpointManager(stateDir)
|
checkpointManager, err := checkpointmanager.NewCheckpointManager(stateDir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to initialize checkpoint manager for pod allocation tracking: %v", err)
|
return nil, fmt.Errorf("failed to initialize checkpoint manager for pod resource information tracking: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
pra, checksum, err := restoreState(checkpointManager, checkpointName)
|
pra, checksum, err := restoreState(checkpointManager, checkpointName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
//lint:ignore ST1005 user-facing error message
|
//lint:ignore ST1005 user-facing error message
|
||||||
return nil, fmt.Errorf("could not restore state from checkpoint: %w, please drain this node and delete pod allocation checkpoint file %q before restarting Kubelet",
|
return nil, fmt.Errorf("could not restore state from checkpoint: %w, please drain this node and delete pod resource information checkpoint file %q before restarting Kubelet",
|
||||||
err, path.Join(stateDir, checkpointName))
|
err, path.Join(stateDir, checkpointName))
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -64,7 +64,7 @@ func NewStateCheckpoint(stateDir, checkpointName string) (State, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// restores state from a checkpoint and creates it if it doesn't exist
|
// restores state from a checkpoint and creates it if it doesn't exist
|
||||||
func restoreState(checkpointManager checkpointmanager.CheckpointManager, checkpointName string) (PodResourceAllocation, checksum.Checksum, error) {
|
func restoreState(checkpointManager checkpointmanager.CheckpointManager, checkpointName string) (PodResourceInfoMap, checksum.Checksum, error) {
|
||||||
checkpoint := &Checkpoint{}
|
checkpoint := &Checkpoint{}
|
||||||
if err := checkpointManager.GetCheckpoint(checkpointName, checkpoint); err != nil {
|
if err := checkpointManager.GetCheckpoint(checkpointName, checkpoint); err != nil {
|
||||||
if err == errors.ErrCheckpointNotFound {
|
if err == errors.ErrCheckpointNotFound {
|
||||||
@ -73,21 +73,21 @@ func restoreState(checkpointManager checkpointmanager.CheckpointManager, checkpo
|
|||||||
return nil, 0, err
|
return nil, 0, err
|
||||||
}
|
}
|
||||||
|
|
||||||
praInfo, err := checkpoint.GetPodResourceAllocationInfo()
|
praInfo, err := checkpoint.GetPodResourceCheckpointInfo()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, 0, fmt.Errorf("failed to get pod resource allocation info: %w", err)
|
return nil, 0, fmt.Errorf("failed to get pod resource information: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(2).InfoS("State checkpoint: restored pod resource allocation state from checkpoint")
|
klog.V(2).InfoS("State checkpoint: restored pod resource state from checkpoint")
|
||||||
return praInfo.AllocationEntries, checkpoint.Checksum, nil
|
return praInfo.Entries, checkpoint.Checksum, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// saves state to a checkpoint, caller is responsible for locking
|
// saves state to a checkpoint, caller is responsible for locking
|
||||||
func (sc *stateCheckpoint) storeState() error {
|
func (sc *stateCheckpoint) storeState() error {
|
||||||
podAllocation := sc.cache.GetPodResourceAllocation()
|
resourceInfo := sc.cache.GetPodResourceInfoMap()
|
||||||
|
|
||||||
checkpoint, err := NewCheckpoint(&PodResourceAllocationInfo{
|
checkpoint, err := NewCheckpoint(&PodResourceCheckpointInfo{
|
||||||
AllocationEntries: podAllocation,
|
Entries: resourceInfo,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create checkpoint: %w", err)
|
return fmt.Errorf("failed to create checkpoint: %w", err)
|
||||||
@ -98,47 +98,50 @@ func (sc *stateCheckpoint) storeState() error {
|
|||||||
}
|
}
|
||||||
err = sc.checkpointManager.CreateCheckpoint(sc.checkpointName, checkpoint)
|
err = sc.checkpointManager.CreateCheckpoint(sc.checkpointName, checkpoint)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.ErrorS(err, "Failed to save pod allocation checkpoint")
|
klog.ErrorS(err, "Failed to save pod resource information checkpoint")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sc.lastChecksum = checkpoint.Checksum
|
sc.lastChecksum = checkpoint.Checksum
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetContainerResourceAllocation returns current resources allocated to a pod's container
|
// GetContainerResources returns current resources information to a pod's container
|
||||||
func (sc *stateCheckpoint) GetContainerResourceAllocation(podUID types.UID, containerName string) (v1.ResourceRequirements, bool) {
|
func (sc *stateCheckpoint) GetContainerResources(podUID types.UID, containerName string) (v1.ResourceRequirements, bool) {
|
||||||
sc.mux.RLock()
|
sc.mux.RLock()
|
||||||
defer sc.mux.RUnlock()
|
defer sc.mux.RUnlock()
|
||||||
return sc.cache.GetContainerResourceAllocation(podUID, containerName)
|
return sc.cache.GetContainerResources(podUID, containerName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPodResourceAllocation returns current pod resource allocation
|
// GetPodResourceInfoMap returns current pod resource information
|
||||||
func (sc *stateCheckpoint) GetPodResourceAllocation() PodResourceAllocation {
|
func (sc *stateCheckpoint) GetPodResourceInfoMap() PodResourceInfoMap {
|
||||||
sc.mux.RLock()
|
sc.mux.RLock()
|
||||||
defer sc.mux.RUnlock()
|
defer sc.mux.RUnlock()
|
||||||
return sc.cache.GetPodResourceAllocation()
|
return sc.cache.GetPodResourceInfoMap()
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetContainerResourceAllocation sets resources allocated to a pod's container
|
// SetContainerResoruces sets resources information for a pod's container
|
||||||
func (sc *stateCheckpoint) SetContainerResourceAllocation(podUID types.UID, containerName string, alloc v1.ResourceRequirements) error {
|
func (sc *stateCheckpoint) SetContainerResources(podUID types.UID, containerName string, resources v1.ResourceRequirements) error {
|
||||||
sc.mux.Lock()
|
sc.mux.Lock()
|
||||||
defer sc.mux.Unlock()
|
defer sc.mux.Unlock()
|
||||||
sc.cache.SetContainerResourceAllocation(podUID, containerName, alloc)
|
err := sc.cache.SetContainerResources(podUID, containerName, resources)
|
||||||
return sc.storeState()
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetPodResourceAllocation sets pod resource allocation
|
|
||||||
func (sc *stateCheckpoint) SetPodResourceAllocation(podUID types.UID, alloc map[string]v1.ResourceRequirements) error {
|
|
||||||
sc.mux.Lock()
|
|
||||||
defer sc.mux.Unlock()
|
|
||||||
err := sc.cache.SetPodResourceAllocation(podUID, alloc)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return sc.storeState()
|
return sc.storeState()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete deletes allocations for specified pod
|
// SetPodResourceInfo sets pod resource information
|
||||||
|
func (sc *stateCheckpoint) SetPodResourceInfo(podUID types.UID, resourceInfo PodResourceInfo) error {
|
||||||
|
sc.mux.Lock()
|
||||||
|
defer sc.mux.Unlock()
|
||||||
|
err := sc.cache.SetPodResourceInfo(podUID, resourceInfo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return sc.storeState()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete deletes resource information for specified pod
|
||||||
func (sc *stateCheckpoint) RemovePod(podUID types.UID) error {
|
func (sc *stateCheckpoint) RemovePod(podUID types.UID) error {
|
||||||
sc.mux.Lock()
|
sc.mux.Lock()
|
||||||
defer sc.mux.Unlock()
|
defer sc.mux.Unlock()
|
||||||
@ -161,19 +164,19 @@ func NewNoopStateCheckpoint() State {
|
|||||||
return &noopStateCheckpoint{}
|
return &noopStateCheckpoint{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sc *noopStateCheckpoint) GetContainerResourceAllocation(_ types.UID, _ string) (v1.ResourceRequirements, bool) {
|
func (sc *noopStateCheckpoint) GetContainerResources(_ types.UID, _ string) (v1.ResourceRequirements, bool) {
|
||||||
return v1.ResourceRequirements{}, false
|
return v1.ResourceRequirements{}, false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sc *noopStateCheckpoint) GetPodResourceAllocation() PodResourceAllocation {
|
func (sc *noopStateCheckpoint) GetPodResourceInfoMap() PodResourceInfoMap {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sc *noopStateCheckpoint) SetContainerResourceAllocation(_ types.UID, _ string, _ v1.ResourceRequirements) error {
|
func (sc *noopStateCheckpoint) SetContainerResources(_ types.UID, _ string, _ v1.ResourceRequirements) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sc *noopStateCheckpoint) SetPodResourceAllocation(_ types.UID, _ map[string]v1.ResourceRequirements) error {
|
func (sc *noopStateCheckpoint) SetPodResourceInfo(_ types.UID, _ PodResourceInfo) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ const testCheckpoint = "pod_status_manager_state"
|
|||||||
|
|
||||||
func newTestStateCheckpoint(t *testing.T) *stateCheckpoint {
|
func newTestStateCheckpoint(t *testing.T) *stateCheckpoint {
|
||||||
testingDir := getTestDir(t)
|
testingDir := getTestDir(t)
|
||||||
cache := NewStateMemory(PodResourceAllocation{})
|
cache := NewStateMemory(PodResourceInfoMap{})
|
||||||
checkpointManager, err := checkpointmanager.NewCheckpointManager(testingDir)
|
checkpointManager, err := checkpointmanager.NewCheckpointManager(testingDir)
|
||||||
require.NoError(t, err, "failed to create checkpoint manager")
|
require.NoError(t, err, "failed to create checkpoint manager")
|
||||||
checkpointName := "pod_state_checkpoint"
|
checkpointName := "pod_state_checkpoint"
|
||||||
@ -56,12 +56,12 @@ func getTestDir(t *testing.T) string {
|
|||||||
return testingDir
|
return testingDir
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifyPodResourceAllocation(t *testing.T, expected, actual *PodResourceAllocation, msgAndArgs string) {
|
func verifyPodResourceAllocation(t *testing.T, expected, actual *PodResourceInfoMap, msgAndArgs string) {
|
||||||
for podUID, containerResourceList := range *expected {
|
for podUID, podResourceInfo := range *expected {
|
||||||
require.Equal(t, len(containerResourceList), len((*actual)[podUID]), msgAndArgs)
|
require.Equal(t, len(podResourceInfo.ContainerResources), len((*actual)[podUID].ContainerResources), msgAndArgs)
|
||||||
for containerName, resourceList := range containerResourceList {
|
for containerName, resourceList := range podResourceInfo.ContainerResources {
|
||||||
for name, quantity := range resourceList.Requests {
|
for name, quantity := range resourceList.Requests {
|
||||||
require.True(t, quantity.Equal((*actual)[podUID][containerName].Requests[name]), msgAndArgs)
|
require.True(t, quantity.Equal((*actual)[podUID].ContainerResources[containerName].Requests[name]), msgAndArgs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -69,7 +69,7 @@ func verifyPodResourceAllocation(t *testing.T, expected, actual *PodResourceAllo
|
|||||||
|
|
||||||
func Test_stateCheckpoint_storeState(t *testing.T) {
|
func Test_stateCheckpoint_storeState(t *testing.T) {
|
||||||
type args struct {
|
type args struct {
|
||||||
podResourceAllocation PodResourceAllocation
|
resInfoMap PodResourceInfoMap
|
||||||
}
|
}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
@ -91,12 +91,14 @@ func Test_stateCheckpoint_storeState(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
name: fmt.Sprintf("resource - %s%s", fact, suf),
|
name: fmt.Sprintf("resource - %s%s", fact, suf),
|
||||||
args: args{
|
args: args{
|
||||||
podResourceAllocation: PodResourceAllocation{
|
resInfoMap: PodResourceInfoMap{
|
||||||
"pod1": {
|
"pod1": {
|
||||||
"container1": {
|
ContainerResources: map[string]v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
"container1": {
|
||||||
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%s%s", fact, suf)),
|
Requests: v1.ResourceList{
|
||||||
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%s%s", fact, suf)),
|
v1.ResourceCPU: resource.MustParse(fmt.Sprintf("%s%s", fact, suf)),
|
||||||
|
v1.ResourceMemory: resource.MustParse(fmt.Sprintf("%s%s", fact, suf)),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -111,33 +113,35 @@ func Test_stateCheckpoint_storeState(t *testing.T) {
|
|||||||
originalSC, err := NewStateCheckpoint(testDir, testCheckpoint)
|
originalSC, err := NewStateCheckpoint(testDir, testCheckpoint)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
for podUID, alloc := range tt.args.podResourceAllocation {
|
for podUID, alloc := range tt.args.resInfoMap {
|
||||||
err = originalSC.SetPodResourceAllocation(podUID, alloc)
|
err = originalSC.SetPodResourceInfo(podUID, alloc)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
actual := originalSC.GetPodResourceAllocation()
|
actual := originalSC.GetPodResourceInfoMap()
|
||||||
verifyPodResourceAllocation(t, &tt.args.podResourceAllocation, &actual, "stored pod resource allocation is not equal to original pod resource allocation")
|
verifyPodResourceAllocation(t, &tt.args.resInfoMap, &actual, "stored pod resource allocation is not equal to original pod resource allocation")
|
||||||
|
|
||||||
newSC, err := NewStateCheckpoint(testDir, testCheckpoint)
|
newSC, err := NewStateCheckpoint(testDir, testCheckpoint)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
actual = newSC.GetPodResourceAllocation()
|
actual = newSC.GetPodResourceInfoMap()
|
||||||
verifyPodResourceAllocation(t, &tt.args.podResourceAllocation, &actual, "restored pod resource allocation is not equal to original pod resource allocation")
|
verifyPodResourceAllocation(t, &tt.args.resInfoMap, &actual, "restored pod resource allocation is not equal to original pod resource allocation")
|
||||||
|
|
||||||
checkpointPath := filepath.Join(testDir, testCheckpoint)
|
checkpointPath := filepath.Join(testDir, testCheckpoint)
|
||||||
require.FileExists(t, checkpointPath)
|
require.FileExists(t, checkpointPath)
|
||||||
require.NoError(t, os.Remove(checkpointPath)) // Remove the checkpoint file to track whether it's re-written.
|
require.NoError(t, os.Remove(checkpointPath)) // Remove the checkpoint file to track whether it's re-written.
|
||||||
|
|
||||||
// Setting the pod allocations to the same values should not re-write the checkpoint.
|
// Setting the pod allocations to the same values should not re-write the checkpoint.
|
||||||
for podUID, alloc := range tt.args.podResourceAllocation {
|
for podUID, alloc := range tt.args.resInfoMap {
|
||||||
require.NoError(t, originalSC.SetPodResourceAllocation(podUID, alloc))
|
require.NoError(t, originalSC.SetPodResourceInfo(podUID, alloc))
|
||||||
require.NoFileExists(t, checkpointPath, "checkpoint should not be re-written")
|
require.NoFileExists(t, checkpointPath, "checkpoint should not be re-written")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setting a new value should update the checkpoint.
|
// Setting a new value should update the checkpoint.
|
||||||
require.NoError(t, originalSC.SetPodResourceAllocation("foo-bar", map[string]v1.ResourceRequirements{
|
require.NoError(t, originalSC.SetPodResourceInfo("foo-bar", PodResourceInfo{
|
||||||
"container1": {Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")}},
|
ContainerResources: map[string]v1.ResourceRequirements{
|
||||||
|
"container1": {Requests: v1.ResourceList{v1.ResourceCPU: resource.MustParse("1")}},
|
||||||
|
},
|
||||||
}))
|
}))
|
||||||
require.FileExists(t, checkpointPath, "checkpoint should be re-written")
|
require.FileExists(t, checkpointPath, "checkpoint should be re-written")
|
||||||
})
|
})
|
||||||
@ -153,13 +157,15 @@ func Test_stateCheckpoint_formatUpgraded(t *testing.T) {
|
|||||||
|
|
||||||
// prepare old checkpoint, ResizeStatusEntries is unset,
|
// prepare old checkpoint, ResizeStatusEntries is unset,
|
||||||
// pretend that the old checkpoint is unaware for the field ResizeStatusEntries
|
// pretend that the old checkpoint is unaware for the field ResizeStatusEntries
|
||||||
const checkpointContent = `{"data":"{\"allocationEntries\":{\"pod1\":{\"container1\":{\"requests\":{\"cpu\":\"1Ki\",\"memory\":\"1Ki\"}}}}}","checksum":1555601526}`
|
const checkpointContent = `{"data":"{\"entries\":{\"pod1\":{\"ContainerResources\":{\"container1\":{\"requests\":{\"cpu\":\"1Ki\",\"memory\":\"1Ki\"}}}}}}","checksum":1178570812}`
|
||||||
expectedPodResourceAllocation := PodResourceAllocation{
|
expectedPodResourceAllocation := PodResourceInfoMap{
|
||||||
"pod1": {
|
"pod1": {
|
||||||
"container1": {
|
ContainerResources: map[string]v1.ResourceRequirements{
|
||||||
Requests: v1.ResourceList{
|
"container1": {
|
||||||
v1.ResourceCPU: resource.MustParse("1Ki"),
|
Requests: v1.ResourceList{
|
||||||
v1.ResourceMemory: resource.MustParse("1Ki"),
|
v1.ResourceCPU: resource.MustParse("1Ki"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("1Ki"),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -178,7 +184,7 @@ func Test_stateCheckpoint_formatUpgraded(t *testing.T) {
|
|||||||
|
|
||||||
sc.cache = NewStateMemory(actualPodResourceAllocation)
|
sc.cache = NewStateMemory(actualPodResourceAllocation)
|
||||||
|
|
||||||
actualPodResourceAllocation = sc.cache.GetPodResourceAllocation()
|
actualPodResourceAllocation = sc.cache.GetPodResourceInfoMap()
|
||||||
|
|
||||||
require.Equal(t, expectedPodResourceAllocation, actualPodResourceAllocation, "pod resource allocation info is not equal")
|
require.Equal(t, expectedPodResourceAllocation, actualPodResourceAllocation, "pod resource allocation info is not equal")
|
||||||
}
|
}
|
||||||
|
@ -27,63 +27,73 @@ import (
|
|||||||
|
|
||||||
type stateMemory struct {
|
type stateMemory struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
podAllocation PodResourceAllocation
|
podResources PodResourceInfoMap
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ State = &stateMemory{}
|
var _ State = &stateMemory{}
|
||||||
|
|
||||||
// NewStateMemory creates new State to track resources allocated to pods
|
// NewStateMemory creates new State to track resources resourcesated to pods
|
||||||
func NewStateMemory(alloc PodResourceAllocation) State {
|
func NewStateMemory(resources PodResourceInfoMap) State {
|
||||||
if alloc == nil {
|
if resources == nil {
|
||||||
alloc = PodResourceAllocation{}
|
resources = PodResourceInfoMap{}
|
||||||
}
|
}
|
||||||
klog.V(2).InfoS("Initialized new in-memory state store for pod resource allocation tracking")
|
klog.V(2).InfoS("Initialized new in-memory state store for pod resource information tracking")
|
||||||
return &stateMemory{
|
return &stateMemory{
|
||||||
podAllocation: alloc,
|
podResources: resources,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stateMemory) GetContainerResourceAllocation(podUID types.UID, containerName string) (v1.ResourceRequirements, bool) {
|
func (s *stateMemory) GetContainerResources(podUID types.UID, containerName string) (v1.ResourceRequirements, bool) {
|
||||||
s.RLock()
|
s.RLock()
|
||||||
defer s.RUnlock()
|
defer s.RUnlock()
|
||||||
|
|
||||||
alloc, ok := s.podAllocation[podUID][containerName]
|
resourceInfo, ok := s.podResources[podUID]
|
||||||
return *alloc.DeepCopy(), ok
|
if !ok {
|
||||||
|
return v1.ResourceRequirements{}, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
resources, ok := resourceInfo.ContainerResources[containerName]
|
||||||
|
if !ok {
|
||||||
|
return v1.ResourceRequirements{}, ok
|
||||||
|
}
|
||||||
|
return *resources.DeepCopy(), ok
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stateMemory) GetPodResourceAllocation() PodResourceAllocation {
|
func (s *stateMemory) GetPodResourceInfoMap() PodResourceInfoMap {
|
||||||
s.RLock()
|
s.RLock()
|
||||||
defer s.RUnlock()
|
defer s.RUnlock()
|
||||||
return s.podAllocation.Clone()
|
return s.podResources.Clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stateMemory) SetContainerResourceAllocation(podUID types.UID, containerName string, alloc v1.ResourceRequirements) error {
|
func (s *stateMemory) SetContainerResources(podUID types.UID, containerName string, resources v1.ResourceRequirements) error {
|
||||||
s.Lock()
|
s.Lock()
|
||||||
defer s.Unlock()
|
defer s.Unlock()
|
||||||
|
|
||||||
if _, ok := s.podAllocation[podUID]; !ok {
|
if _, ok := s.podResources[podUID]; !ok {
|
||||||
s.podAllocation[podUID] = make(map[string]v1.ResourceRequirements)
|
s.podResources[podUID] = PodResourceInfo{
|
||||||
|
ContainerResources: make(map[string]v1.ResourceRequirements),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s.podAllocation[podUID][containerName] = alloc
|
s.podResources[podUID].ContainerResources[containerName] = resources
|
||||||
klog.V(3).InfoS("Updated container resource allocation", "podUID", podUID, "containerName", containerName, "alloc", alloc)
|
klog.V(3).InfoS("Updated container resource information", "podUID", podUID, "containerName", containerName, "resources", resources)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stateMemory) SetPodResourceAllocation(podUID types.UID, alloc map[string]v1.ResourceRequirements) error {
|
func (s *stateMemory) SetPodResourceInfo(podUID types.UID, resourceInfo PodResourceInfo) error {
|
||||||
s.Lock()
|
s.Lock()
|
||||||
defer s.Unlock()
|
defer s.Unlock()
|
||||||
|
|
||||||
s.podAllocation[podUID] = alloc
|
s.podResources[podUID] = resourceInfo
|
||||||
klog.V(3).InfoS("Updated pod resource allocation", "podUID", podUID, "allocation", alloc)
|
klog.V(3).InfoS("Updated pod resource information", "podUID", podUID, "information", resourceInfo)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stateMemory) RemovePod(podUID types.UID) error {
|
func (s *stateMemory) RemovePod(podUID types.UID) error {
|
||||||
s.Lock()
|
s.Lock()
|
||||||
defer s.Unlock()
|
defer s.Unlock()
|
||||||
delete(s.podAllocation, podUID)
|
delete(s.podResources, podUID)
|
||||||
klog.V(3).InfoS("Deleted pod resource allocation", "podUID", podUID)
|
klog.V(3).InfoS("Deleted pod resource information", "podUID", podUID)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -91,9 +101,9 @@ func (s *stateMemory) RemoveOrphanedPods(remainingPods sets.Set[types.UID]) {
|
|||||||
s.Lock()
|
s.Lock()
|
||||||
defer s.Unlock()
|
defer s.Unlock()
|
||||||
|
|
||||||
for podUID := range s.podAllocation {
|
for podUID := range s.podResources {
|
||||||
if _, ok := remainingPods[types.UID(podUID)]; !ok {
|
if _, ok := remainingPods[types.UID(podUID)]; !ok {
|
||||||
delete(s.podAllocation, podUID)
|
delete(s.podResources, podUID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2458,17 +2458,19 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
|||||||
emptyPodSpec.Containers[0].Resources.Requests = v1.ResourceList{}
|
emptyPodSpec.Containers[0].Resources.Requests = v1.ResourceList{}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
existingPodAllocation *v1.Pod
|
existingPodAllocation *v1.Pod
|
||||||
expectedPodResourceAllocation state.PodResourceAllocation
|
expectedPodResourceInfoMap state.PodResourceInfoMap
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "Having both memory and cpu, resource allocation not exists",
|
name: "Having both memory and cpu, resource allocation not exists",
|
||||||
pod: podWithUIDNameNsSpec("1", "pod1", "foo", *cpu500mMem500MPodSpec),
|
pod: podWithUIDNameNsSpec("1", "pod1", "foo", *cpu500mMem500MPodSpec),
|
||||||
expectedPodResourceAllocation: state.PodResourceAllocation{
|
expectedPodResourceInfoMap: state.PodResourceInfoMap{
|
||||||
"1": map[string]v1.ResourceRequirements{
|
"1": {
|
||||||
cpu500mMem500MPodSpec.Containers[0].Name: cpu500mMem500MPodSpec.Containers[0].Resources,
|
ContainerResources: map[string]v1.ResourceRequirements{
|
||||||
|
cpu500mMem500MPodSpec.Containers[0].Name: cpu500mMem500MPodSpec.Containers[0].Resources,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -2476,9 +2478,11 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
|||||||
name: "Having both memory and cpu, resource allocation exists",
|
name: "Having both memory and cpu, resource allocation exists",
|
||||||
pod: podWithUIDNameNsSpec("2", "pod2", "foo", *cpu500mMem500MPodSpec),
|
pod: podWithUIDNameNsSpec("2", "pod2", "foo", *cpu500mMem500MPodSpec),
|
||||||
existingPodAllocation: podWithUIDNameNsSpec("2", "pod2", "foo", *cpu500mMem500MPodSpec),
|
existingPodAllocation: podWithUIDNameNsSpec("2", "pod2", "foo", *cpu500mMem500MPodSpec),
|
||||||
expectedPodResourceAllocation: state.PodResourceAllocation{
|
expectedPodResourceInfoMap: state.PodResourceInfoMap{
|
||||||
"2": map[string]v1.ResourceRequirements{
|
"2": {
|
||||||
cpu500mMem500MPodSpec.Containers[0].Name: cpu500mMem500MPodSpec.Containers[0].Resources,
|
ContainerResources: map[string]v1.ResourceRequirements{
|
||||||
|
cpu500mMem500MPodSpec.Containers[0].Name: cpu500mMem500MPodSpec.Containers[0].Resources,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -2486,18 +2490,22 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
|||||||
name: "Having both memory and cpu, resource allocation exists (with different value)",
|
name: "Having both memory and cpu, resource allocation exists (with different value)",
|
||||||
pod: podWithUIDNameNsSpec("3", "pod3", "foo", *cpu500mMem500MPodSpec),
|
pod: podWithUIDNameNsSpec("3", "pod3", "foo", *cpu500mMem500MPodSpec),
|
||||||
existingPodAllocation: podWithUIDNameNsSpec("3", "pod3", "foo", *cpu800mMem800MPodSpec),
|
existingPodAllocation: podWithUIDNameNsSpec("3", "pod3", "foo", *cpu800mMem800MPodSpec),
|
||||||
expectedPodResourceAllocation: state.PodResourceAllocation{
|
expectedPodResourceInfoMap: state.PodResourceInfoMap{
|
||||||
"3": map[string]v1.ResourceRequirements{
|
"3": state.PodResourceInfo{
|
||||||
cpu800mMem800MPodSpec.Containers[0].Name: cpu800mMem800MPodSpec.Containers[0].Resources,
|
ContainerResources: map[string]v1.ResourceRequirements{
|
||||||
|
cpu800mMem800MPodSpec.Containers[0].Name: cpu800mMem800MPodSpec.Containers[0].Resources,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Only has cpu, resource allocation not exists",
|
name: "Only has cpu, resource allocation not exists",
|
||||||
pod: podWithUIDNameNsSpec("4", "pod5", "foo", *cpu500mPodSpec),
|
pod: podWithUIDNameNsSpec("4", "pod5", "foo", *cpu500mPodSpec),
|
||||||
expectedPodResourceAllocation: state.PodResourceAllocation{
|
expectedPodResourceInfoMap: state.PodResourceInfoMap{
|
||||||
"4": map[string]v1.ResourceRequirements{
|
"4": state.PodResourceInfo{
|
||||||
cpu500mPodSpec.Containers[0].Name: cpu500mPodSpec.Containers[0].Resources,
|
ContainerResources: map[string]v1.ResourceRequirements{
|
||||||
|
cpu500mPodSpec.Containers[0].Name: cpu500mPodSpec.Containers[0].Resources,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -2505,9 +2513,11 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
|||||||
name: "Only has cpu, resource allocation exists",
|
name: "Only has cpu, resource allocation exists",
|
||||||
pod: podWithUIDNameNsSpec("5", "pod5", "foo", *cpu500mPodSpec),
|
pod: podWithUIDNameNsSpec("5", "pod5", "foo", *cpu500mPodSpec),
|
||||||
existingPodAllocation: podWithUIDNameNsSpec("5", "pod5", "foo", *cpu500mPodSpec),
|
existingPodAllocation: podWithUIDNameNsSpec("5", "pod5", "foo", *cpu500mPodSpec),
|
||||||
expectedPodResourceAllocation: state.PodResourceAllocation{
|
expectedPodResourceInfoMap: state.PodResourceInfoMap{
|
||||||
"5": map[string]v1.ResourceRequirements{
|
"5": state.PodResourceInfo{
|
||||||
cpu500mPodSpec.Containers[0].Name: cpu500mPodSpec.Containers[0].Resources,
|
ContainerResources: map[string]v1.ResourceRequirements{
|
||||||
|
cpu500mPodSpec.Containers[0].Name: cpu500mPodSpec.Containers[0].Resources,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -2515,18 +2525,22 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
|||||||
name: "Only has cpu, resource allocation exists (with different value)",
|
name: "Only has cpu, resource allocation exists (with different value)",
|
||||||
pod: podWithUIDNameNsSpec("6", "pod6", "foo", *cpu500mPodSpec),
|
pod: podWithUIDNameNsSpec("6", "pod6", "foo", *cpu500mPodSpec),
|
||||||
existingPodAllocation: podWithUIDNameNsSpec("6", "pod6", "foo", *cpu800mPodSpec),
|
existingPodAllocation: podWithUIDNameNsSpec("6", "pod6", "foo", *cpu800mPodSpec),
|
||||||
expectedPodResourceAllocation: state.PodResourceAllocation{
|
expectedPodResourceInfoMap: state.PodResourceInfoMap{
|
||||||
"6": map[string]v1.ResourceRequirements{
|
"6": state.PodResourceInfo{
|
||||||
cpu800mPodSpec.Containers[0].Name: cpu800mPodSpec.Containers[0].Resources,
|
ContainerResources: map[string]v1.ResourceRequirements{
|
||||||
|
cpu800mPodSpec.Containers[0].Name: cpu800mPodSpec.Containers[0].Resources,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Only has memory, resource allocation not exists",
|
name: "Only has memory, resource allocation not exists",
|
||||||
pod: podWithUIDNameNsSpec("7", "pod7", "foo", *mem500MPodSpec),
|
pod: podWithUIDNameNsSpec("7", "pod7", "foo", *mem500MPodSpec),
|
||||||
expectedPodResourceAllocation: state.PodResourceAllocation{
|
expectedPodResourceInfoMap: state.PodResourceInfoMap{
|
||||||
"7": map[string]v1.ResourceRequirements{
|
"7": state.PodResourceInfo{
|
||||||
mem500MPodSpec.Containers[0].Name: mem500MPodSpec.Containers[0].Resources,
|
ContainerResources: map[string]v1.ResourceRequirements{
|
||||||
|
mem500MPodSpec.Containers[0].Name: mem500MPodSpec.Containers[0].Resources,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -2534,9 +2548,11 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
|||||||
name: "Only has memory, resource allocation exists",
|
name: "Only has memory, resource allocation exists",
|
||||||
pod: podWithUIDNameNsSpec("8", "pod8", "foo", *mem500MPodSpec),
|
pod: podWithUIDNameNsSpec("8", "pod8", "foo", *mem500MPodSpec),
|
||||||
existingPodAllocation: podWithUIDNameNsSpec("8", "pod8", "foo", *mem500MPodSpec),
|
existingPodAllocation: podWithUIDNameNsSpec("8", "pod8", "foo", *mem500MPodSpec),
|
||||||
expectedPodResourceAllocation: state.PodResourceAllocation{
|
expectedPodResourceInfoMap: state.PodResourceInfoMap{
|
||||||
"8": map[string]v1.ResourceRequirements{
|
"8": state.PodResourceInfo{
|
||||||
mem500MPodSpec.Containers[0].Name: mem500MPodSpec.Containers[0].Resources,
|
ContainerResources: map[string]v1.ResourceRequirements{
|
||||||
|
mem500MPodSpec.Containers[0].Name: mem500MPodSpec.Containers[0].Resources,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -2544,18 +2560,22 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
|||||||
name: "Only has memory, resource allocation exists (with different value)",
|
name: "Only has memory, resource allocation exists (with different value)",
|
||||||
pod: podWithUIDNameNsSpec("9", "pod9", "foo", *mem500MPodSpec),
|
pod: podWithUIDNameNsSpec("9", "pod9", "foo", *mem500MPodSpec),
|
||||||
existingPodAllocation: podWithUIDNameNsSpec("9", "pod9", "foo", *mem800MPodSpec),
|
existingPodAllocation: podWithUIDNameNsSpec("9", "pod9", "foo", *mem800MPodSpec),
|
||||||
expectedPodResourceAllocation: state.PodResourceAllocation{
|
expectedPodResourceInfoMap: state.PodResourceInfoMap{
|
||||||
"9": map[string]v1.ResourceRequirements{
|
"9": state.PodResourceInfo{
|
||||||
mem800MPodSpec.Containers[0].Name: mem800MPodSpec.Containers[0].Resources,
|
ContainerResources: map[string]v1.ResourceRequirements{
|
||||||
|
mem800MPodSpec.Containers[0].Name: mem800MPodSpec.Containers[0].Resources,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "No CPU and memory, resource allocation not exists",
|
name: "No CPU and memory, resource allocation not exists",
|
||||||
pod: podWithUIDNameNsSpec("10", "pod10", "foo", *emptyPodSpec),
|
pod: podWithUIDNameNsSpec("10", "pod10", "foo", *emptyPodSpec),
|
||||||
expectedPodResourceAllocation: state.PodResourceAllocation{
|
expectedPodResourceInfoMap: state.PodResourceInfoMap{
|
||||||
"10": map[string]v1.ResourceRequirements{
|
"10": state.PodResourceInfo{
|
||||||
emptyPodSpec.Containers[0].Name: emptyPodSpec.Containers[0].Resources,
|
ContainerResources: map[string]v1.ResourceRequirements{
|
||||||
|
emptyPodSpec.Containers[0].Name: emptyPodSpec.Containers[0].Resources,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -2563,9 +2583,11 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
|||||||
name: "No CPU and memory, resource allocation exists",
|
name: "No CPU and memory, resource allocation exists",
|
||||||
pod: podWithUIDNameNsSpec("11", "pod11", "foo", *emptyPodSpec),
|
pod: podWithUIDNameNsSpec("11", "pod11", "foo", *emptyPodSpec),
|
||||||
existingPodAllocation: podWithUIDNameNsSpec("11", "pod11", "foo", *emptyPodSpec),
|
existingPodAllocation: podWithUIDNameNsSpec("11", "pod11", "foo", *emptyPodSpec),
|
||||||
expectedPodResourceAllocation: state.PodResourceAllocation{
|
expectedPodResourceInfoMap: state.PodResourceInfoMap{
|
||||||
"11": map[string]v1.ResourceRequirements{
|
"11": state.PodResourceInfo{
|
||||||
emptyPodSpec.Containers[0].Name: emptyPodSpec.Containers[0].Resources,
|
ContainerResources: map[string]v1.ResourceRequirements{
|
||||||
|
emptyPodSpec.Containers[0].Name: emptyPodSpec.Containers[0].Resources,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -2585,7 +2607,7 @@ func TestPodResourceAllocationReset(t *testing.T) {
|
|||||||
if !found {
|
if !found {
|
||||||
t.Fatalf("resource allocation should exist: (pod: %#v, container: %s)", tc.pod, tc.pod.Spec.Containers[0].Name)
|
t.Fatalf("resource allocation should exist: (pod: %#v, container: %s)", tc.pod, tc.pod.Spec.Containers[0].Name)
|
||||||
}
|
}
|
||||||
assert.Equal(t, tc.expectedPodResourceAllocation[tc.pod.UID][tc.pod.Spec.Containers[0].Name], allocatedResources, tc.name)
|
assert.Equal(t, tc.expectedPodResourceInfoMap[tc.pod.UID].ContainerResources[tc.pod.Spec.Containers[0].Name], allocatedResources, tc.name)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user