mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-02-21 22:57:15 +00:00
Merge pull request #133285 from yue9944882/automated-cherry-pick-of-#132895-upstream-release-1.33
Automated cherry pick of #132895: Fixes scheduler nil panic due to empty init container request&limit
This commit is contained in:
@@ -404,7 +404,12 @@ func maxResourceList(list, newList v1.ResourceList) {
|
||||
// max returns the result of max(a, b...) for each named resource and is only used if we can't
|
||||
// accumulate into an existing resource list
|
||||
func max(a v1.ResourceList, b ...v1.ResourceList) v1.ResourceList {
|
||||
result := a.DeepCopy()
|
||||
var result v1.ResourceList
|
||||
if a != nil {
|
||||
result = a.DeepCopy()
|
||||
} else {
|
||||
result = v1.ResourceList{}
|
||||
}
|
||||
for _, other := range b {
|
||||
maxResourceList(result, other)
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
func TestPodRequestsAndLimits(t *testing.T) {
|
||||
@@ -1967,11 +1968,14 @@ func TestIsSupportedPodLevelResource(t *testing.T) {
|
||||
func TestAggregateContainerRequestsAndLimits(t *testing.T) {
|
||||
restartAlways := v1.ContainerRestartPolicyAlways
|
||||
cases := []struct {
|
||||
containers []v1.Container
|
||||
initContainers []v1.Container
|
||||
name string
|
||||
expectedRequests v1.ResourceList
|
||||
expectedLimits v1.ResourceList
|
||||
options PodResourcesOptions
|
||||
containers []v1.Container
|
||||
containerStatuses []v1.ContainerStatus
|
||||
initContainers []v1.Container
|
||||
initContainerStatuses []v1.ContainerStatus
|
||||
name string
|
||||
expectedRequests v1.ResourceList
|
||||
expectedLimits v1.ResourceList
|
||||
}{
|
||||
{
|
||||
name: "one container with limits",
|
||||
@@ -2135,20 +2139,74 @@ func TestAggregateContainerRequestsAndLimits(t *testing.T) {
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("17"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "regularcontainers with empty requests, but status with non-empty requests",
|
||||
options: PodResourcesOptions{UseStatusResources: true},
|
||||
containers: []v1.Container{
|
||||
{
|
||||
Name: "container-1",
|
||||
Resources: v1.ResourceRequirements{},
|
||||
},
|
||||
},
|
||||
containerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
Name: "container-1",
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
},
|
||||
expectedLimits: v1.ResourceList{},
|
||||
},
|
||||
{
|
||||
name: "always-restart init containers with empty requests, but status with non-empty requests",
|
||||
options: PodResourcesOptions{UseStatusResources: true},
|
||||
initContainers: []v1.Container{
|
||||
{
|
||||
Name: "container-1",
|
||||
RestartPolicy: ptr.To[v1.ContainerRestartPolicy](v1.ContainerRestartPolicyAlways),
|
||||
Resources: v1.ResourceRequirements{},
|
||||
},
|
||||
},
|
||||
initContainerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
Name: "container-1",
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedRequests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
},
|
||||
expectedLimits: v1.ResourceList{},
|
||||
},
|
||||
}
|
||||
|
||||
for idx, tc := range cases {
|
||||
testPod := &v1.Pod{Spec: v1.PodSpec{Containers: tc.containers, InitContainers: tc.initContainers}}
|
||||
resRequests := AggregateContainerRequests(testPod, PodResourcesOptions{})
|
||||
resLimits := AggregateContainerLimits(testPod, PodResourcesOptions{})
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
testPod := &v1.Pod{
|
||||
Spec: v1.PodSpec{Containers: tc.containers, InitContainers: tc.initContainers},
|
||||
Status: v1.PodStatus{ContainerStatuses: tc.containerStatuses, InitContainerStatuses: tc.initContainerStatuses},
|
||||
}
|
||||
resRequests := AggregateContainerRequests(testPod, tc.options)
|
||||
resLimits := AggregateContainerLimits(testPod, tc.options)
|
||||
|
||||
if !equality.Semantic.DeepEqual(tc.expectedRequests, resRequests) {
|
||||
t.Errorf("test case failure[%d]: %v, requests:\n expected:\t%v\ngot\t\t%v", idx, tc.name, tc.expectedRequests, resRequests)
|
||||
}
|
||||
if !equality.Semantic.DeepEqual(tc.expectedRequests, resRequests) {
|
||||
t.Errorf("test case failure[%d]: %v, requests:\n expected:\t%v\ngot\t\t%v", idx, tc.name, tc.expectedRequests, resRequests)
|
||||
}
|
||||
|
||||
if !equality.Semantic.DeepEqual(tc.expectedLimits, resLimits) {
|
||||
t.Errorf("test case failure[%d]: %v, limits:\n expected:\t%v\ngot\t\t%v", idx, tc.name, tc.expectedLimits, resLimits)
|
||||
}
|
||||
if !equality.Semantic.DeepEqual(tc.expectedLimits, resLimits) {
|
||||
t.Errorf("test case failure[%d]: %v, limits:\n expected:\t%v\ngot\t\t%v", idx, tc.name, tc.expectedLimits, resLimits)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user