mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-05 18:24:07 +00:00
Use cached calculateResource result when removing pod from NodeInfo in preemption
This commit is contained in:
parent
8770bd58d0
commit
0452ae402a
13
pkg/scheduler/backend/cache/cache_test.go
vendored
13
pkg/scheduler/backend/cache/cache_test.go
vendored
@ -25,6 +25,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"github.com/google/go-cmp/cmp/cmpopts"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
@ -51,7 +52,7 @@ func deepEqualWithoutGeneration(actual *nodeInfoListItem, expected *framework.No
|
|||||||
expected.Generation = 0
|
expected.Generation = 0
|
||||||
}
|
}
|
||||||
if actual != nil {
|
if actual != nil {
|
||||||
if diff := cmp.Diff(expected, actual.info, cmp.AllowUnexported(framework.NodeInfo{})); diff != "" {
|
if diff := cmp.Diff(expected, actual.info, cmp.AllowUnexported(framework.NodeInfo{}), cmpopts.IgnoreUnexported(framework.PodInfo{})); diff != "" {
|
||||||
return fmt.Errorf("Unexpected node info (-want,+got):\n%s", diff)
|
return fmt.Errorf("Unexpected node info (-want,+got):\n%s", diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -465,7 +466,7 @@ func TestDump(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for name, ni := range snapshot.Nodes {
|
for name, ni := range snapshot.Nodes {
|
||||||
nItem := cache.nodes[name]
|
nItem := cache.nodes[name]
|
||||||
if diff := cmp.Diff(nItem.info, ni, cmp.AllowUnexported(framework.NodeInfo{})); diff != "" {
|
if diff := cmp.Diff(nItem.info, ni, cmp.AllowUnexported(framework.NodeInfo{}), cmpopts.IgnoreUnexported(framework.PodInfo{})); diff != "" {
|
||||||
t.Errorf("Unexpected node info (-want,+got):\n%s", diff)
|
t.Errorf("Unexpected node info (-want,+got):\n%s", diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1245,7 +1246,7 @@ func TestNodeOperators(t *testing.T) {
|
|||||||
|
|
||||||
// Generations are globally unique. We check in our unit tests that they are incremented correctly.
|
// Generations are globally unique. We check in our unit tests that they are incremented correctly.
|
||||||
expected.Generation = got.info.Generation
|
expected.Generation = got.info.Generation
|
||||||
if diff := cmp.Diff(expected, got.info, cmp.AllowUnexported(framework.NodeInfo{})); diff != "" {
|
if diff := cmp.Diff(expected, got.info, cmp.AllowUnexported(framework.NodeInfo{}), cmpopts.IgnoreUnexported(framework.PodInfo{})); diff != "" {
|
||||||
t.Errorf("Failed to add node into scheduler cache (-want,+got):\n%s", diff)
|
t.Errorf("Failed to add node into scheduler cache (-want,+got):\n%s", diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1264,7 +1265,7 @@ func TestNodeOperators(t *testing.T) {
|
|||||||
t.Errorf("failed to dump cached nodes:\n got: %v \nexpected: %v", cachedNodes.nodeInfoMap, tc.nodes)
|
t.Errorf("failed to dump cached nodes:\n got: %v \nexpected: %v", cachedNodes.nodeInfoMap, tc.nodes)
|
||||||
}
|
}
|
||||||
expected.Generation = newNode.Generation
|
expected.Generation = newNode.Generation
|
||||||
if diff := cmp.Diff(newNode, expected.Snapshot(), cmp.AllowUnexported(framework.NodeInfo{})); diff != "" {
|
if diff := cmp.Diff(newNode, expected.Snapshot(), cmp.AllowUnexported(framework.NodeInfo{}), cmpopts.IgnoreUnexported(framework.PodInfo{})); diff != "" {
|
||||||
t.Errorf("Failed to clone node:\n%s", diff)
|
t.Errorf("Failed to clone node:\n%s", diff)
|
||||||
}
|
}
|
||||||
// check imageState of NodeInfo with specific image when update snapshot
|
// check imageState of NodeInfo with specific image when update snapshot
|
||||||
@ -1286,7 +1287,7 @@ func TestNodeOperators(t *testing.T) {
|
|||||||
}
|
}
|
||||||
expected.Generation = got.info.Generation
|
expected.Generation = got.info.Generation
|
||||||
|
|
||||||
if diff := cmp.Diff(expected, got.info, cmp.AllowUnexported(framework.NodeInfo{})); diff != "" {
|
if diff := cmp.Diff(expected, got.info, cmp.AllowUnexported(framework.NodeInfo{}), cmpopts.IgnoreUnexported(framework.PodInfo{})); diff != "" {
|
||||||
t.Errorf("Unexpected schedulertypes after updating node (-want, +got):\n%s", diff)
|
t.Errorf("Unexpected schedulertypes after updating node (-want, +got):\n%s", diff)
|
||||||
}
|
}
|
||||||
// check imageState of NodeInfo with specific image when update node
|
// check imageState of NodeInfo with specific image when update node
|
||||||
@ -1763,7 +1764,7 @@ func compareCacheWithNodeInfoSnapshot(t *testing.T, cache *cacheImpl, snapshot *
|
|||||||
if want.Node() == nil {
|
if want.Node() == nil {
|
||||||
want = nil
|
want = nil
|
||||||
}
|
}
|
||||||
if diff := cmp.Diff(want, snapshot.nodeInfoMap[name], cmp.AllowUnexported(framework.NodeInfo{})); diff != "" {
|
if diff := cmp.Diff(want, snapshot.nodeInfoMap[name], cmp.AllowUnexported(framework.NodeInfo{}), cmpopts.IgnoreUnexported(framework.PodInfo{})); diff != "" {
|
||||||
return fmt.Errorf("Unexpected node info for node (-want, +got):\n%s", diff)
|
return fmt.Errorf("Unexpected node info for node (-want, +got):\n%s", diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
3
pkg/scheduler/backend/cache/snapshot_test.go
vendored
3
pkg/scheduler/backend/cache/snapshot_test.go
vendored
@ -21,6 +21,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/google/go-cmp/cmp"
|
"github.com/google/go-cmp/cmp"
|
||||||
|
"github.com/google/go-cmp/cmp/cmpopts"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@ -407,7 +408,7 @@ func TestNewSnapshot(t *testing.T) {
|
|||||||
t.Error("node infos should not be nil")
|
t.Error("node infos should not be nil")
|
||||||
}
|
}
|
||||||
for j := range test.expectedNodesInfos[i].Pods {
|
for j := range test.expectedNodesInfos[i].Pods {
|
||||||
if diff := cmp.Diff(test.expectedNodesInfos[i].Pods[j], info.Pods[j]); diff != "" {
|
if diff := cmp.Diff(test.expectedNodesInfos[i].Pods[j], info.Pods[j], cmpopts.IgnoreUnexported(framework.PodInfo{})); diff != "" {
|
||||||
t.Errorf("Unexpected PodInfo (-want +got):\n%s", diff)
|
t.Errorf("Unexpected PodInfo (-want +got):\n%s", diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2242,11 +2242,11 @@ func TestPriorityQueue_NominatedPodsForNode(t *testing.T) {
|
|||||||
}
|
}
|
||||||
expectedList := []*framework.PodInfo{medPriorityPodInfo, unschedulablePodInfo}
|
expectedList := []*framework.PodInfo{medPriorityPodInfo, unschedulablePodInfo}
|
||||||
podInfos := q.NominatedPodsForNode("node1")
|
podInfos := q.NominatedPodsForNode("node1")
|
||||||
if diff := cmp.Diff(expectedList, podInfos); diff != "" {
|
if diff := cmp.Diff(expectedList, podInfos, cmpopts.IgnoreUnexported(framework.PodInfo{})); diff != "" {
|
||||||
t.Errorf("Unexpected list of nominated Pods for node: (-want, +got):\n%s", diff)
|
t.Errorf("Unexpected list of nominated Pods for node: (-want, +got):\n%s", diff)
|
||||||
}
|
}
|
||||||
podInfos[0].Pod.Name = "not mpp"
|
podInfos[0].Pod.Name = "not mpp"
|
||||||
if diff := cmp.Diff(podInfos, q.NominatedPodsForNode("node1")); diff == "" {
|
if diff := cmp.Diff(podInfos, q.NominatedPodsForNode("node1"), cmpopts.IgnoreUnexported(framework.PodInfo{})); diff == "" {
|
||||||
t.Error("Expected list of nominated Pods for node2 is different from podInfos")
|
t.Error("Expected list of nominated Pods for node2 is different from podInfos")
|
||||||
}
|
}
|
||||||
if len(q.NominatedPodsForNode("node2")) != 0 {
|
if len(q.NominatedPodsForNode("node2")) != 0 {
|
||||||
@ -2548,7 +2548,7 @@ func TestUnschedulablePodsMap(t *testing.T) {
|
|||||||
for _, p := range test.podsToAdd {
|
for _, p := range test.podsToAdd {
|
||||||
upm.addOrUpdate(newQueuedPodInfoForLookup(p))
|
upm.addOrUpdate(newQueuedPodInfoForLookup(p))
|
||||||
}
|
}
|
||||||
if diff := cmp.Diff(test.expectedMapAfterAdd, upm.podInfoMap); diff != "" {
|
if diff := cmp.Diff(test.expectedMapAfterAdd, upm.podInfoMap, cmpopts.IgnoreUnexported(framework.PodInfo{})); diff != "" {
|
||||||
t.Errorf("Unexpected map after adding pods(-want, +got):\n%s", diff)
|
t.Errorf("Unexpected map after adding pods(-want, +got):\n%s", diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2556,14 +2556,14 @@ func TestUnschedulablePodsMap(t *testing.T) {
|
|||||||
for _, p := range test.podsToUpdate {
|
for _, p := range test.podsToUpdate {
|
||||||
upm.addOrUpdate(newQueuedPodInfoForLookup(p))
|
upm.addOrUpdate(newQueuedPodInfoForLookup(p))
|
||||||
}
|
}
|
||||||
if diff := cmp.Diff(test.expectedMapAfterUpdate, upm.podInfoMap); diff != "" {
|
if diff := cmp.Diff(test.expectedMapAfterUpdate, upm.podInfoMap, cmpopts.IgnoreUnexported(framework.PodInfo{})); diff != "" {
|
||||||
t.Errorf("Unexpected map after updating pods (-want, +got):\n%s", diff)
|
t.Errorf("Unexpected map after updating pods (-want, +got):\n%s", diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, p := range test.podsToDelete {
|
for _, p := range test.podsToDelete {
|
||||||
upm.delete(p, false)
|
upm.delete(p, false)
|
||||||
}
|
}
|
||||||
if diff := cmp.Diff(test.expectedMapAfterDelete, upm.podInfoMap); diff != "" {
|
if diff := cmp.Diff(test.expectedMapAfterDelete, upm.podInfoMap, cmpopts.IgnoreUnexported(framework.PodInfo{})); diff != "" {
|
||||||
t.Errorf("Unexpected map after deleting pods (-want, +got):\n%s", diff)
|
t.Errorf("Unexpected map after deleting pods (-want, +got):\n%s", diff)
|
||||||
}
|
}
|
||||||
upm.clear()
|
upm.clear()
|
||||||
@ -2917,7 +2917,7 @@ func TestPriorityQueue_initPodMaxInUnschedulablePodsDuration(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if diff := cmp.Diff(test.expected, podInfoList); diff != "" {
|
if diff := cmp.Diff(test.expected, podInfoList, cmpopts.IgnoreUnexported(framework.PodInfo{})); diff != "" {
|
||||||
t.Errorf("Unexpected QueuedPodInfo list (-want, +got):\n%s", diff)
|
t.Errorf("Unexpected QueuedPodInfo list (-want, +got):\n%s", diff)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -3094,7 +3094,7 @@ func TestPodTimestamp(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if diff := cmp.Diff(test.expected, podInfoList); diff != "" {
|
if diff := cmp.Diff(test.expected, podInfoList, cmpopts.IgnoreUnexported(framework.PodInfo{})); diff != "" {
|
||||||
t.Errorf("Unexpected QueuedPodInfo list (-want, +got):\n%s", diff)
|
t.Errorf("Unexpected QueuedPodInfo list (-want, +got):\n%s", diff)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -397,6 +397,13 @@ func (pqi *QueuedPodInfo) DeepCopy() *QueuedPodInfo {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// podResource contains the result of calculateResource and is used only internally.
|
||||||
|
type podResource struct {
|
||||||
|
resource Resource
|
||||||
|
non0CPU int64
|
||||||
|
non0Mem int64
|
||||||
|
}
|
||||||
|
|
||||||
// PodInfo is a wrapper to a Pod with additional pre-computed information to
|
// PodInfo is a wrapper to a Pod with additional pre-computed information to
|
||||||
// accelerate processing. This information is typically immutable (e.g., pre-processed
|
// accelerate processing. This information is typically immutable (e.g., pre-processed
|
||||||
// inter-pod affinity selectors).
|
// inter-pod affinity selectors).
|
||||||
@ -406,6 +413,15 @@ type PodInfo struct {
|
|||||||
RequiredAntiAffinityTerms []AffinityTerm
|
RequiredAntiAffinityTerms []AffinityTerm
|
||||||
PreferredAffinityTerms []WeightedAffinityTerm
|
PreferredAffinityTerms []WeightedAffinityTerm
|
||||||
PreferredAntiAffinityTerms []WeightedAffinityTerm
|
PreferredAntiAffinityTerms []WeightedAffinityTerm
|
||||||
|
// cachedResource contains precomputed resources for Pod (podResource).
|
||||||
|
// The value can change only if InPlacePodVerticalScaling is enabled.
|
||||||
|
// In that case, the whole PodInfo object is recreated (for assigned pods in cache).
|
||||||
|
// cachedResource contains a podResource, computed when adding a scheduled pod to NodeInfo.
|
||||||
|
// When removing a pod from a NodeInfo, i.e. finding victims for preemption or removing a pod from a cluster,
|
||||||
|
// cachedResource is used instead, what provides a noticeable performance boost.
|
||||||
|
// Note: cachedResource field shouldn't be accessed directly.
|
||||||
|
// Use calculateResource method to obtain it instead.
|
||||||
|
cachedResource *podResource
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeepCopy returns a deep copy of the PodInfo object.
|
// DeepCopy returns a deep copy of the PodInfo object.
|
||||||
@ -416,6 +432,7 @@ func (pi *PodInfo) DeepCopy() *PodInfo {
|
|||||||
RequiredAntiAffinityTerms: pi.RequiredAntiAffinityTerms,
|
RequiredAntiAffinityTerms: pi.RequiredAntiAffinityTerms,
|
||||||
PreferredAffinityTerms: pi.PreferredAffinityTerms,
|
PreferredAffinityTerms: pi.PreferredAffinityTerms,
|
||||||
PreferredAntiAffinityTerms: pi.PreferredAntiAffinityTerms,
|
PreferredAntiAffinityTerms: pi.PreferredAntiAffinityTerms,
|
||||||
|
cachedResource: pi.cachedResource,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -464,6 +481,7 @@ func (pi *PodInfo) Update(pod *v1.Pod) error {
|
|||||||
pi.RequiredAntiAffinityTerms = requiredAntiAffinityTerms
|
pi.RequiredAntiAffinityTerms = requiredAntiAffinityTerms
|
||||||
pi.PreferredAffinityTerms = weightedAffinityTerms
|
pi.PreferredAffinityTerms = weightedAffinityTerms
|
||||||
pi.PreferredAntiAffinityTerms = weightedAntiAffinityTerms
|
pi.PreferredAntiAffinityTerms = weightedAntiAffinityTerms
|
||||||
|
pi.cachedResource = nil
|
||||||
return utilerrors.NewAggregate(parseErrs)
|
return utilerrors.NewAggregate(parseErrs)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -963,7 +981,7 @@ func (n *NodeInfo) AddPodInfo(podInfo *PodInfo) {
|
|||||||
if podWithRequiredAntiAffinity(podInfo.Pod) {
|
if podWithRequiredAntiAffinity(podInfo.Pod) {
|
||||||
n.PodsWithRequiredAntiAffinity = append(n.PodsWithRequiredAntiAffinity, podInfo)
|
n.PodsWithRequiredAntiAffinity = append(n.PodsWithRequiredAntiAffinity, podInfo)
|
||||||
}
|
}
|
||||||
n.update(podInfo.Pod, 1)
|
n.update(podInfo, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddPod is a wrapper around AddPodInfo.
|
// AddPod is a wrapper around AddPodInfo.
|
||||||
@ -985,8 +1003,8 @@ func podWithRequiredAntiAffinity(p *v1.Pod) bool {
|
|||||||
len(affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0
|
len(affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func removeFromSlice(logger klog.Logger, s []*PodInfo, k string) ([]*PodInfo, bool) {
|
func removeFromSlice(logger klog.Logger, s []*PodInfo, k string) ([]*PodInfo, *PodInfo) {
|
||||||
var removed bool
|
var removedPod *PodInfo
|
||||||
for i := range s {
|
for i := range s {
|
||||||
tmpKey, err := GetPodKey(s[i].Pod)
|
tmpKey, err := GetPodKey(s[i].Pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -994,18 +1012,18 @@ func removeFromSlice(logger klog.Logger, s []*PodInfo, k string) ([]*PodInfo, bo
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if k == tmpKey {
|
if k == tmpKey {
|
||||||
|
removedPod = s[i]
|
||||||
// delete the element
|
// delete the element
|
||||||
s[i] = s[len(s)-1]
|
s[i] = s[len(s)-1]
|
||||||
s = s[:len(s)-1]
|
s = s[:len(s)-1]
|
||||||
removed = true
|
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// resets the slices to nil so that we can do DeepEqual in unit tests.
|
// resets the slices to nil so that we can do DeepEqual in unit tests.
|
||||||
if len(s) == 0 {
|
if len(s) == 0 {
|
||||||
return nil, removed
|
return nil, removedPod
|
||||||
}
|
}
|
||||||
return s, removed
|
return s, removedPod
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemovePod subtracts pod information from this NodeInfo.
|
// RemovePod subtracts pod information from this NodeInfo.
|
||||||
@ -1021,33 +1039,33 @@ func (n *NodeInfo) RemovePod(logger klog.Logger, pod *v1.Pod) error {
|
|||||||
n.PodsWithRequiredAntiAffinity, _ = removeFromSlice(logger, n.PodsWithRequiredAntiAffinity, k)
|
n.PodsWithRequiredAntiAffinity, _ = removeFromSlice(logger, n.PodsWithRequiredAntiAffinity, k)
|
||||||
}
|
}
|
||||||
|
|
||||||
var removed bool
|
var removedPod *PodInfo
|
||||||
if n.Pods, removed = removeFromSlice(logger, n.Pods, k); removed {
|
if n.Pods, removedPod = removeFromSlice(logger, n.Pods, k); removedPod != nil {
|
||||||
n.update(pod, -1)
|
n.update(removedPod, -1)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fmt.Errorf("no corresponding pod %s in pods of node %s", pod.Name, n.node.Name)
|
return fmt.Errorf("no corresponding pod %s in pods of node %s", pod.Name, n.node.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// update node info based on the pod and sign.
|
// update node info based on the pod, and sign.
|
||||||
// The sign will be set to `+1` when AddPod and to `-1` when RemovePod.
|
// The sign will be set to `+1` when AddPod and to `-1` when RemovePod.
|
||||||
func (n *NodeInfo) update(pod *v1.Pod, sign int64) {
|
func (n *NodeInfo) update(podInfo *PodInfo, sign int64) {
|
||||||
res, non0CPU, non0Mem := calculateResource(pod)
|
podResource := podInfo.calculateResource()
|
||||||
n.Requested.MilliCPU += sign * res.MilliCPU
|
n.Requested.MilliCPU += sign * podResource.resource.MilliCPU
|
||||||
n.Requested.Memory += sign * res.Memory
|
n.Requested.Memory += sign * podResource.resource.Memory
|
||||||
n.Requested.EphemeralStorage += sign * res.EphemeralStorage
|
n.Requested.EphemeralStorage += sign * podResource.resource.EphemeralStorage
|
||||||
if n.Requested.ScalarResources == nil && len(res.ScalarResources) > 0 {
|
if n.Requested.ScalarResources == nil && len(podResource.resource.ScalarResources) > 0 {
|
||||||
n.Requested.ScalarResources = map[v1.ResourceName]int64{}
|
n.Requested.ScalarResources = map[v1.ResourceName]int64{}
|
||||||
}
|
}
|
||||||
for rName, rQuant := range res.ScalarResources {
|
for rName, rQuant := range podResource.resource.ScalarResources {
|
||||||
n.Requested.ScalarResources[rName] += sign * rQuant
|
n.Requested.ScalarResources[rName] += sign * rQuant
|
||||||
}
|
}
|
||||||
n.NonZeroRequested.MilliCPU += sign * non0CPU
|
n.NonZeroRequested.MilliCPU += sign * podResource.non0CPU
|
||||||
n.NonZeroRequested.Memory += sign * non0Mem
|
n.NonZeroRequested.Memory += sign * podResource.non0Mem
|
||||||
|
|
||||||
// Consume ports when pod added or release ports when pod removed.
|
// Consume ports when pod added or release ports when pod removed.
|
||||||
n.updateUsedPorts(pod, sign > 0)
|
n.updateUsedPorts(podInfo.Pod, sign > 0)
|
||||||
n.updatePVCRefCounts(pod, sign > 0)
|
n.updatePVCRefCounts(podInfo.Pod, sign > 0)
|
||||||
|
|
||||||
n.Generation = nextGeneration()
|
n.Generation = nextGeneration()
|
||||||
}
|
}
|
||||||
@ -1103,20 +1121,25 @@ func getNonMissingContainerRequests(requests v1.ResourceList, podLevelResourcesS
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func calculateResource(pod *v1.Pod) (Resource, int64, int64) {
|
func (pi *PodInfo) calculateResource() podResource {
|
||||||
requests := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{
|
if pi.cachedResource != nil {
|
||||||
UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
|
return *pi.cachedResource
|
||||||
|
}
|
||||||
|
inPlacePodVerticalScalingEnabled := utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling)
|
||||||
|
podLevelResourcesEnabled := utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources)
|
||||||
|
requests := resourcehelper.PodRequests(pi.Pod, resourcehelper.PodResourcesOptions{
|
||||||
|
UseStatusResources: inPlacePodVerticalScalingEnabled,
|
||||||
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
|
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
|
||||||
SkipPodLevelResources: !utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources),
|
SkipPodLevelResources: !podLevelResourcesEnabled,
|
||||||
})
|
})
|
||||||
isPodLevelResourcesSet := utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources) && resourcehelper.IsPodLevelRequestsSet(pod)
|
isPodLevelResourcesSet := podLevelResourcesEnabled && resourcehelper.IsPodLevelRequestsSet(pi.Pod)
|
||||||
nonMissingContainerRequests := getNonMissingContainerRequests(requests, isPodLevelResourcesSet)
|
nonMissingContainerRequests := getNonMissingContainerRequests(requests, isPodLevelResourcesSet)
|
||||||
non0Requests := requests
|
non0Requests := requests
|
||||||
if len(nonMissingContainerRequests) > 0 {
|
if len(nonMissingContainerRequests) > 0 {
|
||||||
non0Requests = resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{
|
non0Requests = resourcehelper.PodRequests(pi.Pod, resourcehelper.PodResourcesOptions{
|
||||||
UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
|
UseStatusResources: inPlacePodVerticalScalingEnabled,
|
||||||
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
|
// SkipPodLevelResources is set to false when PodLevelResources feature is enabled.
|
||||||
SkipPodLevelResources: !utilfeature.DefaultFeatureGate.Enabled(features.PodLevelResources),
|
SkipPodLevelResources: !podLevelResourcesEnabled,
|
||||||
NonMissingContainerRequests: nonMissingContainerRequests,
|
NonMissingContainerRequests: nonMissingContainerRequests,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -1125,7 +1148,13 @@ func calculateResource(pod *v1.Pod) (Resource, int64, int64) {
|
|||||||
|
|
||||||
var res Resource
|
var res Resource
|
||||||
res.Add(requests)
|
res.Add(requests)
|
||||||
return res, non0CPU.MilliValue(), non0Mem.Value()
|
podResource := podResource{
|
||||||
|
resource: res,
|
||||||
|
non0CPU: non0CPU.MilliValue(),
|
||||||
|
non0Mem: non0Mem.Value(),
|
||||||
|
}
|
||||||
|
pi.cachedResource = &podResource
|
||||||
|
return podResource
|
||||||
}
|
}
|
||||||
|
|
||||||
// updateUsedPorts updates the UsedPorts of NodeInfo.
|
// updateUsedPorts updates the UsedPorts of NodeInfo.
|
||||||
|
@ -296,6 +296,14 @@ func TestNewNodeInfo(t *testing.T) {
|
|||||||
NodeName: nodeName,
|
NodeName: nodeName,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
cachedResource: &podResource{
|
||||||
|
resource: Resource{
|
||||||
|
MilliCPU: 100,
|
||||||
|
Memory: 500,
|
||||||
|
},
|
||||||
|
non0CPU: 100,
|
||||||
|
non0Mem: 500,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Pod: &v1.Pod{
|
Pod: &v1.Pod{
|
||||||
@ -325,6 +333,14 @@ func TestNewNodeInfo(t *testing.T) {
|
|||||||
NodeName: nodeName,
|
NodeName: nodeName,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
cachedResource: &podResource{
|
||||||
|
resource: Resource{
|
||||||
|
MilliCPU: 200,
|
||||||
|
Memory: 1024,
|
||||||
|
},
|
||||||
|
non0CPU: 200,
|
||||||
|
non0Mem: 1024,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -389,6 +405,14 @@ func TestNodeInfoClone(t *testing.T) {
|
|||||||
NodeName: nodeName,
|
NodeName: nodeName,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
cachedResource: &podResource{
|
||||||
|
resource: Resource{
|
||||||
|
MilliCPU: 100,
|
||||||
|
Memory: 500,
|
||||||
|
},
|
||||||
|
non0CPU: 100,
|
||||||
|
non0Mem: 500,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Pod: &v1.Pod{
|
Pod: &v1.Pod{
|
||||||
@ -418,6 +442,14 @@ func TestNodeInfoClone(t *testing.T) {
|
|||||||
NodeName: nodeName,
|
NodeName: nodeName,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
cachedResource: &podResource{
|
||||||
|
resource: Resource{
|
||||||
|
MilliCPU: 200,
|
||||||
|
Memory: 1024,
|
||||||
|
},
|
||||||
|
non0CPU: 200,
|
||||||
|
non0Mem: 1024,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -463,6 +495,14 @@ func TestNodeInfoClone(t *testing.T) {
|
|||||||
NodeName: nodeName,
|
NodeName: nodeName,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
cachedResource: &podResource{
|
||||||
|
resource: Resource{
|
||||||
|
MilliCPU: 100,
|
||||||
|
Memory: 500,
|
||||||
|
},
|
||||||
|
non0CPU: 100,
|
||||||
|
non0Mem: 500,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Pod: &v1.Pod{
|
Pod: &v1.Pod{
|
||||||
@ -492,6 +532,14 @@ func TestNodeInfoClone(t *testing.T) {
|
|||||||
NodeName: nodeName,
|
NodeName: nodeName,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
cachedResource: &podResource{
|
||||||
|
resource: Resource{
|
||||||
|
MilliCPU: 200,
|
||||||
|
Memory: 1024,
|
||||||
|
},
|
||||||
|
non0CPU: 200,
|
||||||
|
non0Mem: 1024,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -713,6 +761,14 @@ func TestNodeInfoAddPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
cachedResource: &podResource{
|
||||||
|
resource: Resource{
|
||||||
|
MilliCPU: 600,
|
||||||
|
Memory: 500,
|
||||||
|
},
|
||||||
|
non0CPU: 600,
|
||||||
|
non0Mem: 500,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Pod: &v1.Pod{
|
Pod: &v1.Pod{
|
||||||
@ -754,6 +810,14 @@ func TestNodeInfoAddPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
cachedResource: &podResource{
|
||||||
|
resource: Resource{
|
||||||
|
MilliCPU: 700,
|
||||||
|
Memory: 500,
|
||||||
|
},
|
||||||
|
non0CPU: 700,
|
||||||
|
non0Mem: schedutil.DefaultMemoryRequest + 500,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Pod: &v1.Pod{
|
Pod: &v1.Pod{
|
||||||
@ -805,6 +869,14 @@ func TestNodeInfoAddPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
cachedResource: &podResource{
|
||||||
|
resource: Resource{
|
||||||
|
MilliCPU: 1000,
|
||||||
|
Memory: schedutil.DefaultMemoryRequest + 500,
|
||||||
|
},
|
||||||
|
non0CPU: 1000,
|
||||||
|
non0Mem: schedutil.DefaultMemoryRequest + 500,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -820,8 +892,8 @@ func TestNodeInfoAddPod(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
expected.Generation = ni.Generation
|
expected.Generation = ni.Generation
|
||||||
if !reflect.DeepEqual(expected, ni) {
|
if diff := cmp.Diff(expected, ni, cmp.AllowUnexported(NodeInfo{}, PodInfo{}, podResource{})); diff != "" {
|
||||||
t.Errorf("expected: %#v, got: %#v", expected, ni)
|
t.Errorf("unexpected diff (-want, +got):\n%s", diff)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -940,6 +1012,14 @@ func TestNodeInfoRemovePod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
cachedResource: &podResource{
|
||||||
|
resource: Resource{
|
||||||
|
MilliCPU: 600,
|
||||||
|
Memory: 1000,
|
||||||
|
},
|
||||||
|
non0CPU: 600,
|
||||||
|
non0Mem: 1000,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Pod: &v1.Pod{
|
Pod: &v1.Pod{
|
||||||
@ -973,6 +1053,14 @@ func TestNodeInfoRemovePod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
cachedResource: &podResource{
|
||||||
|
resource: Resource{
|
||||||
|
MilliCPU: 700,
|
||||||
|
Memory: 1524,
|
||||||
|
},
|
||||||
|
non0CPU: 700,
|
||||||
|
non0Mem: 1524,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1081,6 +1169,14 @@ func TestNodeInfoRemovePod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
cachedResource: &podResource{
|
||||||
|
resource: Resource{
|
||||||
|
MilliCPU: 700,
|
||||||
|
Memory: 1524,
|
||||||
|
},
|
||||||
|
non0CPU: 700,
|
||||||
|
non0Mem: 1524,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -1524,23 +1620,23 @@ var (
|
|||||||
restartAlways = v1.ContainerRestartPolicyAlways
|
restartAlways = v1.ContainerRestartPolicyAlways
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestCalculateResources(t *testing.T) {
|
func TestPodInfoCalculateResources(t *testing.T) {
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
containers []v1.Container
|
containers []v1.Container
|
||||||
podResources *v1.ResourceRequirements
|
podResources *v1.ResourceRequirements
|
||||||
podLevelResourcesEnabled bool
|
podLevelResourcesEnabled bool
|
||||||
expectedResource Resource
|
expectedResource podResource
|
||||||
expectedNon0CPU int64
|
|
||||||
expectedNon0Mem int64
|
|
||||||
initContainers []v1.Container
|
initContainers []v1.Container
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "requestless container",
|
name: "requestless container",
|
||||||
containers: []v1.Container{{}},
|
containers: []v1.Container{{}},
|
||||||
expectedResource: Resource{},
|
expectedResource: podResource{
|
||||||
expectedNon0CPU: schedutil.DefaultMilliCPURequest,
|
resource: Resource{},
|
||||||
expectedNon0Mem: schedutil.DefaultMemoryRequest,
|
non0CPU: schedutil.DefaultMilliCPURequest,
|
||||||
|
non0Mem: schedutil.DefaultMemoryRequest,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "1X container with requests",
|
name: "1X container with requests",
|
||||||
@ -1554,12 +1650,14 @@ func TestCalculateResources(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedResource: Resource{
|
expectedResource: podResource{
|
||||||
|
resource: Resource{
|
||||||
MilliCPU: cpu500m.MilliValue(),
|
MilliCPU: cpu500m.MilliValue(),
|
||||||
Memory: mem500M.Value(),
|
Memory: mem500M.Value(),
|
||||||
},
|
},
|
||||||
expectedNon0CPU: cpu500m.MilliValue(),
|
non0CPU: cpu500m.MilliValue(),
|
||||||
expectedNon0Mem: mem500M.Value(),
|
non0Mem: mem500M.Value(),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "2X container with requests",
|
name: "2X container with requests",
|
||||||
@ -1581,12 +1679,14 @@ func TestCalculateResources(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedResource: Resource{
|
expectedResource: podResource{
|
||||||
|
resource: Resource{
|
||||||
MilliCPU: cpu500m.MilliValue() + cpu700m.MilliValue(),
|
MilliCPU: cpu500m.MilliValue() + cpu700m.MilliValue(),
|
||||||
Memory: mem500M.Value() + mem800M.Value(),
|
Memory: mem500M.Value() + mem800M.Value(),
|
||||||
},
|
},
|
||||||
expectedNon0CPU: cpu500m.MilliValue() + cpu700m.MilliValue(),
|
non0CPU: cpu500m.MilliValue() + cpu700m.MilliValue(),
|
||||||
expectedNon0Mem: mem500M.Value() + mem800M.Value(),
|
non0Mem: mem500M.Value() + mem800M.Value(),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "1X container and 1X init container with pod-level requests",
|
name: "1X container and 1X init container with pod-level requests",
|
||||||
@ -1617,12 +1717,14 @@ func TestCalculateResources(t *testing.T) {
|
|||||||
v1.ResourceMemory: mem1200M,
|
v1.ResourceMemory: mem1200M,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedResource: Resource{
|
expectedResource: podResource{
|
||||||
|
resource: Resource{
|
||||||
MilliCPU: cpu1200m.MilliValue(),
|
MilliCPU: cpu1200m.MilliValue(),
|
||||||
Memory: mem1200M.Value(),
|
Memory: mem1200M.Value(),
|
||||||
},
|
},
|
||||||
expectedNon0CPU: cpu1200m.MilliValue(),
|
non0CPU: cpu1200m.MilliValue(),
|
||||||
expectedNon0Mem: mem1200M.Value(),
|
non0Mem: mem1200M.Value(),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "1X container and 1X sidecar container with pod-level requests",
|
name: "1X container and 1X sidecar container with pod-level requests",
|
||||||
@ -1654,12 +1756,14 @@ func TestCalculateResources(t *testing.T) {
|
|||||||
v1.ResourceMemory: mem1200M,
|
v1.ResourceMemory: mem1200M,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedResource: Resource{
|
expectedResource: podResource{
|
||||||
|
resource: Resource{
|
||||||
MilliCPU: cpu1200m.MilliValue(),
|
MilliCPU: cpu1200m.MilliValue(),
|
||||||
Memory: mem1200M.Value(),
|
Memory: mem1200M.Value(),
|
||||||
},
|
},
|
||||||
expectedNon0CPU: cpu1200m.MilliValue(),
|
non0CPU: cpu1200m.MilliValue(),
|
||||||
expectedNon0Mem: mem1200M.Value(),
|
non0Mem: mem1200M.Value(),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "1X container with pod-level memory requests",
|
name: "1X container with pod-level memory requests",
|
||||||
@ -1679,11 +1783,13 @@ func TestCalculateResources(t *testing.T) {
|
|||||||
v1.ResourceMemory: mem1200M,
|
v1.ResourceMemory: mem1200M,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedResource: Resource{
|
expectedResource: podResource{
|
||||||
|
resource: Resource{
|
||||||
Memory: mem1200M.Value(),
|
Memory: mem1200M.Value(),
|
||||||
},
|
},
|
||||||
expectedNon0CPU: schedutil.DefaultMilliCPURequest,
|
non0CPU: schedutil.DefaultMilliCPURequest,
|
||||||
expectedNon0Mem: mem1200M.Value(),
|
non0Mem: mem1200M.Value(),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "1X container with pod-level cpu requests",
|
name: "1X container with pod-level cpu requests",
|
||||||
@ -1703,11 +1809,13 @@ func TestCalculateResources(t *testing.T) {
|
|||||||
v1.ResourceCPU: cpu500m,
|
v1.ResourceCPU: cpu500m,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedResource: Resource{
|
expectedResource: podResource{
|
||||||
|
resource: Resource{
|
||||||
MilliCPU: cpu500m.MilliValue(),
|
MilliCPU: cpu500m.MilliValue(),
|
||||||
},
|
},
|
||||||
expectedNon0CPU: cpu500m.MilliValue(),
|
non0CPU: cpu500m.MilliValue(),
|
||||||
expectedNon0Mem: schedutil.DefaultMemoryRequest,
|
non0Mem: schedutil.DefaultMemoryRequest,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "1X container unsupported resources and pod-level supported resources",
|
name: "1X container unsupported resources and pod-level supported resources",
|
||||||
@ -1735,36 +1843,32 @@ func TestCalculateResources(t *testing.T) {
|
|||||||
v1.ResourceCPU: cpu500m,
|
v1.ResourceCPU: cpu500m,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
expectedResource: Resource{
|
expectedResource: podResource{
|
||||||
|
resource: Resource{
|
||||||
MilliCPU: cpu500m.MilliValue(),
|
MilliCPU: cpu500m.MilliValue(),
|
||||||
EphemeralStorage: mem800M.Value(),
|
EphemeralStorage: mem800M.Value(),
|
||||||
},
|
},
|
||||||
expectedNon0CPU: cpu500m.MilliValue(),
|
non0CPU: cpu500m.MilliValue(),
|
||||||
expectedNon0Mem: schedutil.DefaultMemoryRequest,
|
non0Mem: schedutil.DefaultMemoryRequest,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodLevelResources, tc.podLevelResourcesEnabled)
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodLevelResources, tc.podLevelResourcesEnabled)
|
||||||
pod := &v1.Pod{
|
podInfo := PodInfo{
|
||||||
|
Pod: &v1.Pod{
|
||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Resources: tc.podResources,
|
Resources: tc.podResources,
|
||||||
Containers: tc.containers,
|
Containers: tc.containers,
|
||||||
InitContainers: tc.initContainers,
|
InitContainers: tc.initContainers,
|
||||||
},
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
res, non0CPU, non0Mem := calculateResource(pod)
|
res := podInfo.calculateResource()
|
||||||
if !reflect.DeepEqual(res, tc.expectedResource) {
|
if !reflect.DeepEqual(tc.expectedResource, res) {
|
||||||
t.Errorf("Test: %s expected resource: %+v, got: %+v", tc.name, tc.expectedResource, res)
|
t.Errorf("Test: %s expected resource: %+v, got: %+v", tc.name, tc.expectedResource, res.resource)
|
||||||
}
|
|
||||||
|
|
||||||
if non0CPU != tc.expectedNon0CPU {
|
|
||||||
t.Errorf("Test: %s expected non0CPU: %d, got: %d", tc.name, tc.expectedNon0CPU, non0CPU)
|
|
||||||
}
|
|
||||||
|
|
||||||
if non0Mem != tc.expectedNon0Mem {
|
|
||||||
t.Errorf("Test: %s expected non0Mem: %d, got: %d", tc.name, tc.expectedNon0Mem, non0Mem)
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -1785,11 +1889,11 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
|
|||||||
|
|
||||||
restartAlways := v1.ContainerRestartPolicyAlways
|
restartAlways := v1.ContainerRestartPolicyAlways
|
||||||
|
|
||||||
preparePod := func(pod v1.Pod,
|
preparePodInfo := func(pod v1.Pod,
|
||||||
requests, statusResources,
|
requests, statusResources,
|
||||||
initRequests, initStatusResources,
|
initRequests, initStatusResources,
|
||||||
sidecarRequests, sidecarStatusResources *v1.ResourceList,
|
sidecarRequests, sidecarStatusResources *v1.ResourceList,
|
||||||
resizeStatus v1.PodResizeStatus) v1.Pod {
|
resizeStatus v1.PodResizeStatus) PodInfo {
|
||||||
|
|
||||||
if requests != nil {
|
if requests != nil {
|
||||||
pod.Spec.Containers = append(pod.Spec.Containers,
|
pod.Spec.Containers = append(pod.Spec.Containers,
|
||||||
@ -1846,7 +1950,7 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pod.Status.Resize = resizeStatus
|
pod.Status.Resize = resizeStatus
|
||||||
return pod
|
return PodInfo{Pod: &pod}
|
||||||
}
|
}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
@ -1858,45 +1962,63 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
|
|||||||
sidecarRequests *v1.ResourceList
|
sidecarRequests *v1.ResourceList
|
||||||
sidecarStatusResources *v1.ResourceList
|
sidecarStatusResources *v1.ResourceList
|
||||||
resizeStatus v1.PodResizeStatus
|
resizeStatus v1.PodResizeStatus
|
||||||
expectedResource Resource
|
expectedResource podResource
|
||||||
expectedNon0CPU int64
|
|
||||||
expectedNon0Mem int64
|
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "Pod with no pending resize",
|
name: "Pod with no pending resize",
|
||||||
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||||
statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||||
resizeStatus: "",
|
resizeStatus: "",
|
||||||
expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()},
|
expectedResource: podResource{
|
||||||
expectedNon0CPU: cpu500m.MilliValue(),
|
resource: Resource{
|
||||||
expectedNon0Mem: mem500M.Value(),
|
MilliCPU: cpu500m.MilliValue(),
|
||||||
|
Memory: mem500M.Value(),
|
||||||
|
},
|
||||||
|
non0CPU: cpu500m.MilliValue(),
|
||||||
|
non0Mem: mem500M.Value(),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Pod with resize in progress",
|
name: "Pod with resize in progress",
|
||||||
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||||
statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||||
resizeStatus: v1.PodResizeStatusInProgress,
|
resizeStatus: v1.PodResizeStatusInProgress,
|
||||||
expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()},
|
expectedResource: podResource{
|
||||||
expectedNon0CPU: cpu500m.MilliValue(),
|
resource: Resource{
|
||||||
expectedNon0Mem: mem500M.Value(),
|
MilliCPU: cpu500m.MilliValue(),
|
||||||
|
Memory: mem500M.Value(),
|
||||||
|
},
|
||||||
|
non0CPU: cpu500m.MilliValue(),
|
||||||
|
non0Mem: mem500M.Value(),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Pod with deferred resize",
|
name: "Pod with deferred resize",
|
||||||
requests: v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
requests: v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||||
statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||||
resizeStatus: v1.PodResizeStatusDeferred,
|
resizeStatus: v1.PodResizeStatusDeferred,
|
||||||
expectedResource: Resource{MilliCPU: cpu700m.MilliValue(), Memory: mem800M.Value()},
|
expectedResource: podResource{
|
||||||
expectedNon0CPU: cpu700m.MilliValue(),
|
resource: Resource{
|
||||||
expectedNon0Mem: mem800M.Value(),
|
MilliCPU: cpu700m.MilliValue(),
|
||||||
|
Memory: mem800M.Value(),
|
||||||
|
},
|
||||||
|
non0CPU: cpu700m.MilliValue(),
|
||||||
|
non0Mem: mem800M.Value(),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Pod with infeasible resize",
|
name: "Pod with infeasible resize",
|
||||||
requests: v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
requests: v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||||
statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||||
resizeStatus: v1.PodResizeStatusInfeasible,
|
resizeStatus: v1.PodResizeStatusInfeasible,
|
||||||
expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()},
|
expectedResource: podResource{
|
||||||
expectedNon0CPU: cpu500m.MilliValue(),
|
resource: Resource{
|
||||||
expectedNon0Mem: mem500M.Value(),
|
MilliCPU: cpu500m.MilliValue(),
|
||||||
|
Memory: mem500M.Value(),
|
||||||
|
},
|
||||||
|
non0CPU: cpu500m.MilliValue(),
|
||||||
|
non0Mem: mem500M.Value(),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Pod with init container and no pending resize",
|
name: "Pod with init container and no pending resize",
|
||||||
@ -1905,9 +2027,14 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
|
|||||||
initRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
initRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||||
initStatusResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
initStatusResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||||
resizeStatus: "",
|
resizeStatus: "",
|
||||||
expectedResource: Resource{MilliCPU: cpu700m.MilliValue(), Memory: mem800M.Value()},
|
expectedResource: podResource{
|
||||||
expectedNon0CPU: cpu700m.MilliValue(),
|
resource: Resource{
|
||||||
expectedNon0Mem: mem800M.Value(),
|
MilliCPU: cpu700m.MilliValue(),
|
||||||
|
Memory: mem800M.Value(),
|
||||||
|
},
|
||||||
|
non0CPU: cpu700m.MilliValue(),
|
||||||
|
non0Mem: mem800M.Value(),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Pod with sider container and no pending resize",
|
name: "Pod with sider container and no pending resize",
|
||||||
@ -1918,32 +2045,28 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
|
|||||||
sidecarRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
sidecarRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||||
sidecarStatusResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
sidecarStatusResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||||
resizeStatus: "",
|
resizeStatus: "",
|
||||||
expectedResource: Resource{
|
expectedResource: podResource{
|
||||||
|
resource: Resource{
|
||||||
MilliCPU: cpu500m.MilliValue() + cpu700m.MilliValue(),
|
MilliCPU: cpu500m.MilliValue() + cpu700m.MilliValue(),
|
||||||
Memory: mem500M.Value() + mem800M.Value(),
|
Memory: mem500M.Value() + mem800M.Value(),
|
||||||
},
|
},
|
||||||
expectedNon0CPU: cpu500m.MilliValue() + cpu700m.MilliValue(),
|
non0CPU: cpu500m.MilliValue() + cpu700m.MilliValue(),
|
||||||
expectedNon0Mem: mem500M.Value() + mem800M.Value(),
|
non0Mem: mem500M.Value() + mem800M.Value(),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
pod := preparePod(*testpod.DeepCopy(),
|
podInfo := preparePodInfo(*testpod.DeepCopy(),
|
||||||
&tt.requests, &tt.statusResources,
|
&tt.requests, &tt.statusResources,
|
||||||
tt.initRequests, tt.initStatusResources,
|
tt.initRequests, tt.initStatusResources,
|
||||||
tt.sidecarRequests, tt.sidecarStatusResources,
|
tt.sidecarRequests, tt.sidecarStatusResources,
|
||||||
tt.resizeStatus)
|
tt.resizeStatus)
|
||||||
|
|
||||||
res, non0CPU, non0Mem := calculateResource(&pod)
|
res := podInfo.calculateResource()
|
||||||
if !reflect.DeepEqual(tt.expectedResource, res) {
|
if !reflect.DeepEqual(tt.expectedResource, res) {
|
||||||
t.Errorf("Test: %s expected resource: %+v, got: %+v", tt.name, tt.expectedResource, res)
|
t.Errorf("Test: %s expected resource: %+v, got: %+v", tt.name, tt.expectedResource, res.resource)
|
||||||
}
|
|
||||||
if non0CPU != tt.expectedNon0CPU {
|
|
||||||
t.Errorf("Test: %s expected non0CPU: %d, got: %d", tt.name, tt.expectedNon0CPU, non0CPU)
|
|
||||||
}
|
|
||||||
if non0Mem != tt.expectedNon0Mem {
|
|
||||||
t.Errorf("Test: %s expected non0Mem: %d, got: %d", tt.name, tt.expectedNon0Mem, non0Mem)
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user