implement sidecar resource calculation

This commit is contained in:
Todd Neal 2023-03-10 11:57:25 -06:00 committed by Gunju Kim
parent 03c2217687
commit ea1eb7f8f7
No known key found for this signature in database
GPG Key ID: 9300A528F3F0DAB7
5 changed files with 619 additions and 103 deletions

View File

@ -65,7 +65,7 @@ func PodRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
cs, found := containerStatuses[container.Name]
if found {
if pod.Status.Resize == v1.PodResizeStatusInfeasible {
containerReqs = cs.AllocatedResources
containerReqs = cs.AllocatedResources.DeepCopy()
} else {
containerReqs = max(container.Resources.Requests, cs.AllocatedResources)
}
@ -83,20 +83,44 @@ func PodRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
addResourceList(reqs, containerReqs)
}
restartableInitContainerReqs := v1.ResourceList{}
initContainerReqs := v1.ResourceList{}
// init containers define the minimum of any resource
// Note: In-place resize is not allowed for InitContainers, so no need to check for ResizeStatus value
//
// Let's say `InitContainerUse(i)` is the resource requirements when the i-th
// init container is initializing, then
// `InitContainerUse(i) = sum(Resources of restartable init containers with index < i) + Resources of i-th init container`.
//
// See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#exposing-pod-resource-requirements for the detail.
for _, container := range pod.Spec.InitContainers {
containerReqs := container.Resources.Requests
if len(opts.NonMissingContainerRequests) > 0 {
containerReqs = applyNonMissing(containerReqs, opts.NonMissingContainerRequests)
}
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
// and add them to the resulting cumulative container requests
addResourceList(reqs, containerReqs)
// track our cumulative restartable init container resources
addResourceList(restartableInitContainerReqs, containerReqs)
containerReqs = restartableInitContainerReqs
} else {
tmp := v1.ResourceList{}
addResourceList(tmp, containerReqs)
addResourceList(tmp, restartableInitContainerReqs)
containerReqs = tmp
}
if opts.ContainerFn != nil {
opts.ContainerFn(containerReqs, podutil.InitContainers)
}
maxResourceList(reqs, containerReqs)
maxResourceList(initContainerReqs, containerReqs)
}
maxResourceList(reqs, initContainerReqs)
// Add overhead for running a pod to the sum of requests if requested:
if !opts.ExcludeOverhead && pod.Spec.Overhead != nil {
addResourceList(reqs, pod.Spec.Overhead)
@ -135,14 +159,40 @@ func PodLimits(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
}
addResourceList(limits, container.Resources.Limits)
}
restartableInitContainerLimits := v1.ResourceList{}
initContainerLimits := v1.ResourceList{}
// init containers define the minimum of any resource
//
// Let's say `InitContainerUse(i)` is the resource requirements when the i-th
// init container is initializing, then
// `InitContainerUse(i) = sum(Resources of restartable init containers with index < i) + Resources of i-th init container`.
//
// See https://github.com/kubernetes/enhancements/tree/master/keps/sig-node/753-sidecar-containers#exposing-pod-resource-requirements for the detail.
for _, container := range pod.Spec.InitContainers {
if opts.ContainerFn != nil {
opts.ContainerFn(container.Resources.Limits, podutil.InitContainers)
containerLimits := container.Resources.Limits
// Is the init container marked as a restartable init container?
if container.RestartPolicy != nil && *container.RestartPolicy == v1.ContainerRestartPolicyAlways {
addResourceList(limits, containerLimits)
// track our cumulative restartable init container resources
addResourceList(restartableInitContainerLimits, containerLimits)
containerLimits = restartableInitContainerLimits
} else {
tmp := v1.ResourceList{}
addResourceList(tmp, containerLimits)
addResourceList(tmp, restartableInitContainerLimits)
containerLimits = tmp
}
maxResourceList(limits, container.Resources.Limits)
if opts.ContainerFn != nil {
opts.ContainerFn(containerLimits, podutil.InitContainers)
}
maxResourceList(initContainerLimits, containerLimits)
}
maxResourceList(limits, initContainerLimits)
// Add overhead to non-zero limits if requested:
if !opts.ExcludeOverhead && pod.Spec.Overhead != nil {
for name, quantity := range pod.Spec.Overhead {

View File

@ -576,6 +576,7 @@ func getPod(cname string, resources podResources) *v1.Pod {
}
func TestPodResourceRequests(t *testing.T) {
restartAlways := v1.ContainerRestartPolicyAlways
testCases := []struct {
description string
options PodResourcesOptions
@ -794,23 +795,206 @@ func TestPodResourceRequests(t *testing.T) {
},
},
},
{
description: "restartable init container",
expectedRequests: v1.ResourceList{
// restartable init + regular container
v1.ResourceCPU: resource.MustParse("2"),
},
initContainers: []v1.Container{
{
Name: "restartable-init-1",
RestartPolicy: &restartAlways,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
},
},
},
},
containers: []v1.Container{
{
Name: "container-1",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
},
},
},
},
},
{
description: "multiple restartable init containers",
expectedRequests: v1.ResourceList{
// max(5, restartable init containers(3+2+1) + regular(1)) = 7
v1.ResourceCPU: resource.MustParse("7"),
},
initContainers: []v1.Container{
{
Name: "init-1",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5"),
},
},
},
{
Name: "restartable-init-1",
RestartPolicy: &restartAlways,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
},
},
},
{
Name: "restartable-init-2",
RestartPolicy: &restartAlways,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
},
},
},
{
Name: "restartable-init-3",
RestartPolicy: &restartAlways,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("3"),
},
},
},
},
containers: []v1.Container{
{
Name: "container-1",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
},
},
},
},
},
{
description: "multiple restartable and regular init containers",
expectedRequests: v1.ResourceList{
// init-2 requires 5 + the previously running restartable init
// containers(1+2) = 8, the restartable init container that starts
// after it doesn't count
v1.ResourceCPU: resource.MustParse("8"),
},
initContainers: []v1.Container{
{
Name: "init-1",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5"),
},
},
},
{
Name: "restartable-init-1",
RestartPolicy: &restartAlways,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
},
},
},
{
Name: "restartable-init-2",
RestartPolicy: &restartAlways,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
},
},
},
{
Name: "init-2",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5"),
},
},
},
{
Name: "restartable-init-3",
RestartPolicy: &restartAlways,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("3"),
},
},
},
},
containers: []v1.Container{
{
Name: "container-1",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
},
},
},
},
},
{
description: "restartable-init, init and regular",
expectedRequests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("210"),
},
initContainers: []v1.Container{
{
Name: "restartable-init-1",
RestartPolicy: &restartAlways,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10"),
},
},
},
{
Name: "init-1",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("200"),
},
},
},
},
containers: []v1.Container{
{
Name: "container-1",
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100"),
},
},
},
},
},
}
for _, tc := range testCases {
p := &v1.Pod{
Spec: v1.PodSpec{
Containers: tc.containers,
InitContainers: tc.initContainers,
Overhead: tc.overhead,
},
Status: v1.PodStatus{
ContainerStatuses: tc.containerStatus,
Resize: tc.podResizeStatus,
},
}
request := PodRequests(p, tc.options)
if !resourcesEqual(tc.expectedRequests, request) {
t.Errorf("[%s] expected requests = %v, got %v", tc.description, tc.expectedRequests, request)
}
t.Run(tc.description, func(t *testing.T) {
p := &v1.Pod{
Spec: v1.PodSpec{
Containers: tc.containers,
InitContainers: tc.initContainers,
Overhead: tc.overhead,
},
Status: v1.PodStatus{
ContainerStatuses: tc.containerStatus,
Resize: tc.podResizeStatus,
},
}
request := PodRequests(p, tc.options)
if !resourcesEqual(tc.expectedRequests, request) {
t.Errorf("[%s] expected requests = %v, got %v", tc.description, tc.expectedRequests, request)
}
})
}
}
@ -848,6 +1032,7 @@ func TestPodResourceRequestsReuse(t *testing.T) {
}
func TestPodResourceLimits(t *testing.T) {
restartAlways := v1.ContainerRestartPolicyAlways
testCases := []struct {
description string
options PodResourcesOptions
@ -1045,19 +1230,202 @@ func TestPodResourceLimits(t *testing.T) {
},
},
},
{
description: "restartable init container",
expectedLimits: v1.ResourceList{
// restartable init + regular container
v1.ResourceCPU: resource.MustParse("2"),
},
initContainers: []v1.Container{
{
Name: "restartable-init-1",
RestartPolicy: &restartAlways,
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
},
},
},
},
containers: []v1.Container{
{
Name: "container-1",
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
},
},
},
},
},
{
description: "multiple restartable init containers",
expectedLimits: v1.ResourceList{
// max(5, restartable init containers(3+2+1) + regular(1)) = 7
v1.ResourceCPU: resource.MustParse("7"),
},
initContainers: []v1.Container{
{
Name: "init-1",
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5"),
},
},
},
{
Name: "restartable-init-1",
RestartPolicy: &restartAlways,
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
},
},
},
{
Name: "restartable-init-2",
RestartPolicy: &restartAlways,
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
},
},
},
{
Name: "restartable-init-3",
RestartPolicy: &restartAlways,
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("3"),
},
},
},
},
containers: []v1.Container{
{
Name: "container-1",
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
},
},
},
},
},
{
description: "multiple restartable and regular init containers",
expectedLimits: v1.ResourceList{
// init-2 requires 5 + the previously running restartable init
// containers(1+2) = 8, the restartable init container that starts
// after it doesn't count
v1.ResourceCPU: resource.MustParse("8"),
},
initContainers: []v1.Container{
{
Name: "init-1",
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5"),
},
},
},
{
Name: "restartable-init-1",
RestartPolicy: &restartAlways,
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
},
},
},
{
Name: "restartable-init-2",
RestartPolicy: &restartAlways,
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("2"),
},
},
},
{
Name: "init-2",
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("5"),
},
},
},
{
Name: "restartable-init-3",
RestartPolicy: &restartAlways,
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("3"),
},
},
},
},
containers: []v1.Container{
{
Name: "container-1",
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1"),
},
},
},
},
},
{
description: "restartable-init, init and regular",
expectedLimits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("210"),
},
initContainers: []v1.Container{
{
Name: "restartable-init-1",
RestartPolicy: &restartAlways,
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10"),
},
},
},
{
Name: "init-1",
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("200"),
},
},
},
},
containers: []v1.Container{
{
Name: "container-1",
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("100"),
},
},
},
},
},
}
for _, tc := range testCases {
p := &v1.Pod{
Spec: v1.PodSpec{
Containers: tc.containers,
InitContainers: tc.initContainers,
Overhead: tc.overhead,
},
}
limits := PodLimits(p, tc.options)
if !resourcesEqual(tc.expectedLimits, limits) {
t.Errorf("[%s] expected limits = %v, got %v", tc.description, tc.expectedLimits, limits)
}
t.Run(tc.description, func(t *testing.T) {
p := &v1.Pod{
Spec: v1.PodSpec{
Containers: tc.containers,
InitContainers: tc.initContainers,
Overhead: tc.overhead,
},
}
limits := PodLimits(p, tc.options)
if !resourcesEqual(tc.expectedLimits, limits) {
t.Errorf("[%s] expected limits = %v, got %v", tc.description, tc.expectedLimits, limits)
}
})
}
}

View File

@ -153,7 +153,7 @@ func TestAddRemovePodsWithRestartableInitContainer(t *testing.T) {
},
{
desc: "pod with sidecar (sidecar containers feature enabled)",
probePaths: []probeKey{{"sidecar_pod", "sidecar", readiness}},
probePaths: []probeKey{{"restartable_init_container_pod", "restartable-init", readiness}},
enableSidecarContainers: true,
},
}

View File

@ -34,11 +34,14 @@ import (
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apiserver/pkg/admission"
genericadmissioninitailizer "k8s.io/apiserver/pkg/admission/initializer"
"k8s.io/apiserver/pkg/util/feature"
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
corev1listers "k8s.io/client-go/listers/core/v1"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/utils/lru"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/features"
)
const (
@ -382,45 +385,6 @@ func limitRequestRatioConstraint(limitType string, resourceName string, enforced
return nil
}
// sum takes the total of each named resource across all inputs
// if a key is not in each input, then the output resource list will omit the key
func sum(inputs []api.ResourceList) api.ResourceList {
result := api.ResourceList{}
keys := []api.ResourceName{}
for i := range inputs {
for k := range inputs[i] {
keys = append(keys, k)
}
}
for _, key := range keys {
total, isSet := int64(0), true
for i := range inputs {
input := inputs[i]
v, exists := input[key]
if exists {
if key == api.ResourceCPU {
total = total + v.MilliValue()
} else {
total = total + v.Value()
}
} else {
isSet = false
}
}
if isSet {
if key == api.ResourceCPU {
result[key] = *(resource.NewMilliQuantity(total, resource.DecimalSI))
} else {
result[key] = *(resource.NewQuantity(total, resource.DecimalSI))
}
}
}
return result
}
// DefaultLimitRangerActions is the default implementation of LimitRangerActions.
type DefaultLimitRangerActions struct{}
@ -561,38 +525,11 @@ func PodValidateLimitFunc(limitRange *corev1.LimitRange, pod *api.Pod) error {
// enforce pod limits on init containers
if limitType == corev1.LimitTypePod {
// TODO: look into re-using resourcehelper.PodRequests/resourcehelper.PodLimits instead of duplicating
// that calculation
containerRequests, containerLimits := []api.ResourceList{}, []api.ResourceList{}
for j := range pod.Spec.Containers {
container := &pod.Spec.Containers[j]
containerRequests = append(containerRequests, container.Resources.Requests)
containerLimits = append(containerLimits, container.Resources.Limits)
}
podRequests := sum(containerRequests)
podLimits := sum(containerLimits)
for j := range pod.Spec.InitContainers {
container := &pod.Spec.InitContainers[j]
// take max(sum_containers, any_init_container)
for k, v := range container.Resources.Requests {
if v2, ok := podRequests[k]; ok {
if v.Cmp(v2) > 0 {
podRequests[k] = v
}
} else {
podRequests[k] = v
}
}
for k, v := range container.Resources.Limits {
if v2, ok := podLimits[k]; ok {
if v.Cmp(v2) > 0 {
podLimits[k] = v
}
} else {
podLimits[k] = v
}
}
opts := podResourcesOptions{
InPlacePodVerticalScalingEnabled: feature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
}
podRequests := podRequests(pod, opts)
podLimits := podLimits(pod, opts)
for k, v := range limit.Min {
if err := minConstraint(string(limitType), string(k), v, podRequests, podLimits); err != nil {
errs = append(errs, err)
@ -612,3 +549,146 @@ func PodValidateLimitFunc(limitRange *corev1.LimitRange, pod *api.Pod) error {
}
return utilerrors.NewAggregate(errs)
}
type podResourcesOptions struct {
// InPlacePodVerticalScalingEnabled indicates that the in-place pod vertical scaling feature gate is enabled.
InPlacePodVerticalScalingEnabled bool
}
// podRequests is a simplified version of pkg/api/v1/resource/PodRequests that operates against the core version of
// pod. Any changes to that calculation should be reflected here.
// TODO: Maybe we can consider doing a partial conversion of the pod to a v1
// type and then using the pkg/api/v1/resource/PodRequests.
func podRequests(pod *api.Pod, opts podResourcesOptions) api.ResourceList {
reqs := api.ResourceList{}
var containerStatuses map[string]*api.ContainerStatus
if opts.InPlacePodVerticalScalingEnabled {
containerStatuses = map[string]*api.ContainerStatus{}
for i := range pod.Status.ContainerStatuses {
containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
}
}
for _, container := range pod.Spec.Containers {
containerReqs := container.Resources.Requests
if opts.InPlacePodVerticalScalingEnabled {
cs, found := containerStatuses[container.Name]
if found {
if pod.Status.Resize == api.PodResizeStatusInfeasible {
containerReqs = cs.AllocatedResources
} else {
containerReqs = max(container.Resources.Requests, cs.AllocatedResources)
}
}
}
addResourceList(reqs, containerReqs)
}
restartableInitCotnainerReqs := api.ResourceList{}
initContainerReqs := api.ResourceList{}
// init containers define the minimum of any resource
// Note: In-place resize is not allowed for InitContainers, so no need to check for ResizeStatus value
for _, container := range pod.Spec.InitContainers {
containerReqs := container.Resources.Requests
if container.RestartPolicy != nil && *container.RestartPolicy == api.ContainerRestartPolicyAlways {
// and add them to the resulting cumulative container requests
addResourceList(reqs, containerReqs)
// track our cumulative restartable init container resources
addResourceList(restartableInitCotnainerReqs, containerReqs)
containerReqs = restartableInitCotnainerReqs
} else {
tmp := api.ResourceList{}
addResourceList(tmp, containerReqs)
addResourceList(tmp, restartableInitCotnainerReqs)
containerReqs = tmp
}
maxResourceList(initContainerReqs, containerReqs)
}
maxResourceList(reqs, initContainerReqs)
return reqs
}
// podLimits is a simplified version of pkg/api/v1/resource/PodLimits that operates against the core version of
// pod. Any changes to that calculation should be reflected here.
// TODO: Maybe we can consider doing a partial conversion of the pod to a v1
// type and then using the pkg/api/v1/resource/PodLimits.
func podLimits(pod *api.Pod, opts podResourcesOptions) api.ResourceList {
limits := api.ResourceList{}
for _, container := range pod.Spec.Containers {
addResourceList(limits, container.Resources.Limits)
}
restartableInitContainerLimits := api.ResourceList{}
initContainerLimits := api.ResourceList{}
// init containers define the minimum of any resource
for _, container := range pod.Spec.InitContainers {
containerLimits := container.Resources.Limits
// Is the init container marked as a sidecar?
if container.RestartPolicy != nil && *container.RestartPolicy == api.ContainerRestartPolicyAlways {
addResourceList(limits, containerLimits)
// track our cumulative restartable init container resources
addResourceList(restartableInitContainerLimits, containerLimits)
containerLimits = restartableInitContainerLimits
} else {
tmp := api.ResourceList{}
addResourceList(tmp, containerLimits)
addResourceList(tmp, restartableInitContainerLimits)
containerLimits = tmp
}
maxResourceList(initContainerLimits, containerLimits)
}
maxResourceList(limits, initContainerLimits)
return limits
}
// addResourceList adds the resources in newList to list.
func addResourceList(list, newList api.ResourceList) {
for name, quantity := range newList {
if value, ok := list[name]; !ok {
list[name] = quantity.DeepCopy()
} else {
value.Add(quantity)
list[name] = value
}
}
}
// maxResourceList sets list to the greater of list/newList for every resource in newList
func maxResourceList(list, newList api.ResourceList) {
for name, quantity := range newList {
if value, ok := list[name]; !ok || quantity.Cmp(value) > 0 {
list[name] = quantity.DeepCopy()
}
}
}
// max returns the result of max(a, b) for each named resource and is only used if we can't
// accumulate into an existing resource list
func max(a api.ResourceList, b api.ResourceList) api.ResourceList {
result := api.ResourceList{}
for key, value := range a {
if other, found := b[key]; found {
if value.Cmp(other) <= 0 {
result[key] = other.DeepCopy()
continue
}
}
result[key] = value.DeepCopy()
}
for key, value := range b {
if _, found := result[key]; !found {
result[key] = value.DeepCopy()
}
}
return result
}

View File

@ -38,6 +38,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
api "k8s.io/kubernetes/pkg/apis/core"
v1 "k8s.io/kubernetes/pkg/apis/core/v1"
)
@ -626,6 +627,11 @@ func TestPodLimitFunc(t *testing.T) {
pod: validPod("pod-max-local-ephemeral-storage-ratio", 3, getResourceRequirements(getLocalStorageResourceList("250Mi"), getLocalStorageResourceList("500Mi"))),
limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getLocalStorageResourceList("2Gi"), api.ResourceList{}, api.ResourceList{}, getLocalStorageResourceList("1.5")),
},
{
pod: withRestartableInitContainer(getComputeResourceList("1500m", ""), api.ResourceList{},
validPod("ctr-max-cpu-limit-restartable-init-container", 1, getResourceRequirements(getComputeResourceList("1000m", ""), getComputeResourceList("1500m", "")))),
limitRange: createLimitRange(api.LimitTypePod, api.ResourceList{}, getComputeResourceList("2", ""), api.ResourceList{}, api.ResourceList{}, api.ResourceList{}),
},
}
for i := range errorCases {
test := errorCases[i]
@ -640,6 +646,18 @@ func TestPodLimitFunc(t *testing.T) {
}
}
func withRestartableInitContainer(requests, limits api.ResourceList, pod api.Pod) api.Pod {
policyAlways := api.ContainerRestartPolicyAlways
pod.Spec.InitContainers = append(pod.Spec.InitContainers,
api.Container{
RestartPolicy: &policyAlways,
Image: "foo:V" + strconv.Itoa(len(pod.Spec.InitContainers)),
Resources: getResourceRequirements(requests, limits),
Name: "foo-" + strconv.Itoa(len(pod.Spec.InitContainers)),
})
return pod
}
func getLocalStorageResourceList(ephemeralStorage string) api.ResourceList {
res := api.ResourceList{}
if ephemeralStorage != "" {