mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 14:07:14 +00:00
Merge pull request #128377 from tallclair/allocated-status-2
[FG:InPlacePodVerticalScaling] Implement AllocatedResources status changes for Beta
This commit is contained in:
commit
f81a68f488
@ -831,18 +831,29 @@ func dropDisabledPodStatusFields(podStatus, oldPodStatus *api.PodStatus, podSpec
|
||||
}
|
||||
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) && !inPlacePodVerticalScalingInUse(oldPodSpec) {
|
||||
// Drop Resize, AllocatedResources, and Resources fields
|
||||
dropResourcesFields := func(csl []api.ContainerStatus) {
|
||||
// Drop Resize and Resources fields
|
||||
dropResourcesField := func(csl []api.ContainerStatus) {
|
||||
for i := range csl {
|
||||
csl[i].AllocatedResources = nil
|
||||
csl[i].Resources = nil
|
||||
}
|
||||
}
|
||||
dropResourcesFields(podStatus.ContainerStatuses)
|
||||
dropResourcesFields(podStatus.InitContainerStatuses)
|
||||
dropResourcesFields(podStatus.EphemeralContainerStatuses)
|
||||
dropResourcesField(podStatus.ContainerStatuses)
|
||||
dropResourcesField(podStatus.InitContainerStatuses)
|
||||
dropResourcesField(podStatus.EphemeralContainerStatuses)
|
||||
podStatus.Resize = ""
|
||||
}
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) ||
|
||||
!utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScalingAllocatedStatus) {
|
||||
// Drop AllocatedResources field
|
||||
dropAllocatedResourcesField := func(csl []api.ContainerStatus) {
|
||||
for i := range csl {
|
||||
csl[i].AllocatedResources = nil
|
||||
}
|
||||
}
|
||||
dropAllocatedResourcesField(podStatus.ContainerStatuses)
|
||||
dropAllocatedResourcesField(podStatus.InitContainerStatuses)
|
||||
dropAllocatedResourcesField(podStatus.EphemeralContainerStatuses)
|
||||
}
|
||||
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) && !dynamicResourceAllocationInUse(oldPodSpec) {
|
||||
podStatus.ResourceClaimStatuses = nil
|
||||
@ -1289,26 +1300,17 @@ func MarkPodProposedForResize(oldPod, newPod *api.Pod) {
|
||||
}
|
||||
|
||||
for i, c := range newPod.Spec.Containers {
|
||||
if c.Name != oldPod.Spec.Containers[i].Name {
|
||||
return // Update is invalid (container mismatch): let validation handle it.
|
||||
}
|
||||
if c.Resources.Requests == nil {
|
||||
continue
|
||||
}
|
||||
if cmp.Equal(oldPod.Spec.Containers[i].Resources, c.Resources) {
|
||||
continue
|
||||
}
|
||||
findContainerStatus := func(css []api.ContainerStatus, cName string) (api.ContainerStatus, bool) {
|
||||
for i := range css {
|
||||
if css[i].Name == cName {
|
||||
return css[i], true
|
||||
}
|
||||
}
|
||||
return api.ContainerStatus{}, false
|
||||
}
|
||||
if cs, ok := findContainerStatus(newPod.Status.ContainerStatuses, c.Name); ok {
|
||||
if !cmp.Equal(c.Resources.Requests, cs.AllocatedResources) {
|
||||
newPod.Status.Resize = api.PodResizeStatusProposed
|
||||
break
|
||||
}
|
||||
}
|
||||
newPod.Status.Resize = api.PodResizeStatusProposed
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2638,56 +2638,69 @@ func TestDropInPlacePodVerticalScaling(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
for _, enabled := range []bool{true, false} {
|
||||
for _, oldPodInfo := range podInfo {
|
||||
for _, newPodInfo := range podInfo {
|
||||
oldPodHasInPlaceVerticalScaling, oldPod := oldPodInfo.hasInPlaceVerticalScaling, oldPodInfo.pod()
|
||||
newPodHasInPlaceVerticalScaling, newPod := newPodInfo.hasInPlaceVerticalScaling, newPodInfo.pod()
|
||||
if newPod == nil {
|
||||
continue
|
||||
}
|
||||
for _, ippvsEnabled := range []bool{true, false} {
|
||||
t.Run(fmt.Sprintf("InPlacePodVerticalScaling=%t", ippvsEnabled), func(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, ippvsEnabled)
|
||||
|
||||
t.Run(fmt.Sprintf("feature enabled=%v, old pod %v, new pod %v", enabled, oldPodInfo.description, newPodInfo.description), func(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, enabled)
|
||||
for _, allocatedStatusEnabled := range []bool{true, false} {
|
||||
t.Run(fmt.Sprintf("AllocatedStatus=%t", allocatedStatusEnabled), func(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingAllocatedStatus, allocatedStatusEnabled)
|
||||
|
||||
var oldPodSpec *api.PodSpec
|
||||
var oldPodStatus *api.PodStatus
|
||||
if oldPod != nil {
|
||||
oldPodSpec = &oldPod.Spec
|
||||
oldPodStatus = &oldPod.Status
|
||||
}
|
||||
dropDisabledFields(&newPod.Spec, nil, oldPodSpec, nil)
|
||||
dropDisabledPodStatusFields(&newPod.Status, oldPodStatus, &newPod.Spec, oldPodSpec)
|
||||
for _, oldPodInfo := range podInfo {
|
||||
for _, newPodInfo := range podInfo {
|
||||
oldPodHasInPlaceVerticalScaling, oldPod := oldPodInfo.hasInPlaceVerticalScaling, oldPodInfo.pod()
|
||||
newPodHasInPlaceVerticalScaling, newPod := newPodInfo.hasInPlaceVerticalScaling, newPodInfo.pod()
|
||||
if newPod == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// old pod should never be changed
|
||||
if !reflect.DeepEqual(oldPod, oldPodInfo.pod()) {
|
||||
t.Errorf("old pod changed: %v", cmp.Diff(oldPod, oldPodInfo.pod()))
|
||||
}
|
||||
t.Run(fmt.Sprintf("old pod %v, new pod %v", oldPodInfo.description, newPodInfo.description), func(t *testing.T) {
|
||||
var oldPodSpec *api.PodSpec
|
||||
var oldPodStatus *api.PodStatus
|
||||
if oldPod != nil {
|
||||
oldPodSpec = &oldPod.Spec
|
||||
oldPodStatus = &oldPod.Status
|
||||
}
|
||||
dropDisabledFields(&newPod.Spec, nil, oldPodSpec, nil)
|
||||
dropDisabledPodStatusFields(&newPod.Status, oldPodStatus, &newPod.Spec, oldPodSpec)
|
||||
|
||||
switch {
|
||||
case enabled || oldPodHasInPlaceVerticalScaling:
|
||||
// new pod shouldn't change if feature enabled or if old pod has ResizePolicy set
|
||||
if !reflect.DeepEqual(newPod, newPodInfo.pod()) {
|
||||
t.Errorf("new pod changed: %v", cmp.Diff(newPod, newPodInfo.pod()))
|
||||
}
|
||||
case newPodHasInPlaceVerticalScaling:
|
||||
// new pod should be changed
|
||||
if reflect.DeepEqual(newPod, newPodInfo.pod()) {
|
||||
t.Errorf("new pod was not changed")
|
||||
}
|
||||
// new pod should not have ResizePolicy
|
||||
if !reflect.DeepEqual(newPod, podWithoutInPlaceVerticalScaling()) {
|
||||
t.Errorf("new pod has ResizePolicy: %v", cmp.Diff(newPod, podWithoutInPlaceVerticalScaling()))
|
||||
}
|
||||
default:
|
||||
// new pod should not need to be changed
|
||||
if !reflect.DeepEqual(newPod, newPodInfo.pod()) {
|
||||
t.Errorf("new pod changed: %v", cmp.Diff(newPod, newPodInfo.pod()))
|
||||
// old pod should never be changed
|
||||
if !reflect.DeepEqual(oldPod, oldPodInfo.pod()) {
|
||||
t.Errorf("old pod changed: %v", cmp.Diff(oldPod, oldPodInfo.pod()))
|
||||
}
|
||||
|
||||
switch {
|
||||
case ippvsEnabled || oldPodHasInPlaceVerticalScaling:
|
||||
// new pod shouldn't change if feature enabled or if old pod has ResizePolicy set
|
||||
expected := newPodInfo.pod()
|
||||
if !ippvsEnabled || !allocatedStatusEnabled {
|
||||
expected.Status.ContainerStatuses[0].AllocatedResources = nil
|
||||
}
|
||||
if !reflect.DeepEqual(newPod, expected) {
|
||||
t.Errorf("new pod changed: %v", cmp.Diff(newPod, expected))
|
||||
}
|
||||
case newPodHasInPlaceVerticalScaling:
|
||||
// new pod should be changed
|
||||
if reflect.DeepEqual(newPod, newPodInfo.pod()) {
|
||||
t.Errorf("new pod was not changed")
|
||||
}
|
||||
// new pod should not have ResizePolicy
|
||||
if !reflect.DeepEqual(newPod, podWithoutInPlaceVerticalScaling()) {
|
||||
t.Errorf("new pod has ResizePolicy: %v", cmp.Diff(newPod, podWithoutInPlaceVerticalScaling()))
|
||||
}
|
||||
default:
|
||||
// new pod should not need to be changed
|
||||
if !reflect.DeepEqual(newPod, newPodInfo.pod()) {
|
||||
t.Errorf("new pod changed: %v", cmp.Diff(newPod, newPodInfo.pod()))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -2796,439 +2809,277 @@ func TestDropSidecarContainers(t *testing.T) {
|
||||
|
||||
func TestMarkPodProposedForResize(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
newPod *api.Pod
|
||||
oldPod *api.Pod
|
||||
expectedPod *api.Pod
|
||||
desc string
|
||||
newPodSpec api.PodSpec
|
||||
oldPodSpec api.PodSpec
|
||||
expectProposedResize bool
|
||||
}{
|
||||
{
|
||||
desc: "nil requests",
|
||||
newPod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
ContainerStatuses: []api.ContainerStatus{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
},
|
||||
newPodSpec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
},
|
||||
},
|
||||
},
|
||||
oldPod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
ContainerStatuses: []api.ContainerStatus{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedPod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
ContainerStatuses: []api.ContainerStatus{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
},
|
||||
oldPodSpec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
},
|
||||
},
|
||||
},
|
||||
expectProposedResize: false,
|
||||
},
|
||||
{
|
||||
desc: "resources unchanged",
|
||||
newPod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
ContainerStatuses: []api.ContainerStatus{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
newPodSpec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
oldPod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
ContainerStatuses: []api.ContainerStatus{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedPod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
ContainerStatuses: []api.ContainerStatus{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
oldPodSpec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectProposedResize: false,
|
||||
},
|
||||
{
|
||||
desc: "resize desired",
|
||||
newPod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "c2",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
|
||||
},
|
||||
desc: "requests resized",
|
||||
newPodSpec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
ContainerStatuses: []api.ContainerStatus{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
},
|
||||
{
|
||||
Name: "c2",
|
||||
Image: "image",
|
||||
AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
{
|
||||
Name: "c2",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
oldPod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "c2",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
|
||||
},
|
||||
oldPodSpec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
ContainerStatuses: []api.ContainerStatus{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
},
|
||||
{
|
||||
Name: "c2",
|
||||
Image: "image",
|
||||
AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
{
|
||||
Name: "c2",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedPod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "c2",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
|
||||
},
|
||||
expectProposedResize: true,
|
||||
},
|
||||
{
|
||||
desc: "limits resized",
|
||||
newPodSpec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
Resize: api.PodResizeStatusProposed,
|
||||
ContainerStatuses: []api.ContainerStatus{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
},
|
||||
{
|
||||
Name: "c2",
|
||||
Image: "image",
|
||||
AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
{
|
||||
Name: "c2",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
oldPodSpec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "c2",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("500m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectProposedResize: true,
|
||||
},
|
||||
{
|
||||
desc: "the number of containers in the pod has increased; no action should be taken.",
|
||||
newPod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "c2",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
|
||||
},
|
||||
newPodSpec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
ContainerStatuses: []api.ContainerStatus{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
},
|
||||
{
|
||||
Name: "c2",
|
||||
Image: "image",
|
||||
AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
{
|
||||
Name: "c2",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
oldPod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
ContainerStatuses: []api.ContainerStatus{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedPod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "c2",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
ContainerStatuses: []api.ContainerStatus{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
},
|
||||
{
|
||||
Name: "c2",
|
||||
Image: "image",
|
||||
AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
oldPodSpec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectProposedResize: false,
|
||||
},
|
||||
{
|
||||
desc: "the number of containers in the pod has decreased; no action should be taken.",
|
||||
newPod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
ContainerStatuses: []api.ContainerStatus{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
newPodSpec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
oldPod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "c2",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
|
||||
},
|
||||
oldPodSpec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
ContainerStatuses: []api.ContainerStatus{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
},
|
||||
{
|
||||
Name: "c2",
|
||||
Image: "image",
|
||||
AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
{
|
||||
Name: "c2",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedPod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
expectProposedResize: false,
|
||||
},
|
||||
{
|
||||
desc: "containers reordered",
|
||||
newPodSpec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
ContainerStatuses: []api.ContainerStatus{
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
AllocatedResources: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
{
|
||||
Name: "c2",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
oldPodSpec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "c2",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("100m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("200m")},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "c1",
|
||||
Image: "image",
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{api.ResourceCPU: resource.MustParse("300m")},
|
||||
Limits: api.ResourceList{api.ResourceCPU: resource.MustParse("400m")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectProposedResize: false,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
MarkPodProposedForResize(tc.oldPod, tc.newPod)
|
||||
if diff := cmp.Diff(tc.expectedPod, tc.newPod); diff != "" {
|
||||
t.Errorf("unexpected pod spec (-want, +got):\n%s", diff)
|
||||
newPod := &api.Pod{Spec: tc.newPodSpec}
|
||||
newPodUnchanged := newPod.DeepCopy()
|
||||
oldPod := &api.Pod{Spec: tc.oldPodSpec}
|
||||
MarkPodProposedForResize(oldPod, newPod)
|
||||
if tc.expectProposedResize {
|
||||
assert.Equal(t, api.PodResizeStatusProposed, newPod.Status.Resize)
|
||||
} else {
|
||||
assert.Equal(t, api.PodResizeStatus(""), newPod.Status.Resize)
|
||||
}
|
||||
newPod.Status.Resize = newPodUnchanged.Status.Resize // Only field that might have changed.
|
||||
assert.Equal(t, newPodUnchanged, newPod, "No fields other than .status.resize should be modified")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -257,6 +257,19 @@ const (
|
||||
// deletion ordering.
|
||||
HonorPVReclaimPolicy featuregate.Feature = "HonorPVReclaimPolicy"
|
||||
|
||||
// owner: @vinaykul,@tallclair
|
||||
// kep: http://kep.k8s.io/1287
|
||||
//
|
||||
// Enables In-Place Pod Vertical Scaling
|
||||
InPlacePodVerticalScaling featuregate.Feature = "InPlacePodVerticalScaling"
|
||||
|
||||
// owner: @tallclair
|
||||
// kep: http://kep.k8s.io/1287
|
||||
//
|
||||
// Enables the AllocatedResources field in container status. This feature requires
|
||||
// InPlacePodVerticalScaling also be enabled.
|
||||
InPlacePodVerticalScalingAllocatedStatus featuregate.Feature = "InPlacePodVerticalScalingAllocatedStatus"
|
||||
|
||||
// owner: @trierra
|
||||
//
|
||||
// Disables the Portworx in-tree driver.
|
||||
@ -741,12 +754,6 @@ const (
|
||||
// Initial implementation focused on ReadWriteOncePod volumes.
|
||||
SELinuxMountReadWriteOncePod featuregate.Feature = "SELinuxMountReadWriteOncePod"
|
||||
|
||||
// owner: @vinaykul
|
||||
// kep: http://kep.k8s.io/1287
|
||||
//
|
||||
// Enables In-Place Pod Vertical Scaling
|
||||
InPlacePodVerticalScaling featuregate.Feature = "InPlacePodVerticalScaling"
|
||||
|
||||
// owner: @Sh4d1,@RyanAoh,@rikatz
|
||||
// kep: http://kep.k8s.io/1860
|
||||
// LoadBalancerIPMode enables the IPMode field in the LoadBalancerIngress status of a Service
|
||||
|
@ -384,6 +384,10 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate
|
||||
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
InPlacePodVerticalScalingAllocatedStatus: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
InTreePluginPortworxUnregister: {
|
||||
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
@ -117,18 +117,17 @@ func HugePageLimits(resourceList v1.ResourceList) map[int64]int64 {
|
||||
}
|
||||
|
||||
// ResourceConfigForPod takes the input pod and outputs the cgroup resource config.
|
||||
func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64, enforceMemoryQoS bool) *ResourceConfig {
|
||||
inPlacePodVerticalScalingEnabled := utilfeature.DefaultFeatureGate.Enabled(kubefeatures.InPlacePodVerticalScaling)
|
||||
// sum requests and limits.
|
||||
reqs := resource.PodRequests(pod, resource.PodResourcesOptions{
|
||||
InPlacePodVerticalScalingEnabled: inPlacePodVerticalScalingEnabled,
|
||||
func ResourceConfigForPod(allocatedPod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64, enforceMemoryQoS bool) *ResourceConfig {
|
||||
reqs := resource.PodRequests(allocatedPod, resource.PodResourcesOptions{
|
||||
// pod is already configured to the allocated resources, and we explicitly don't want to use
|
||||
// the actual resources if we're instantiating a resize.
|
||||
UseStatusResources: false,
|
||||
})
|
||||
// track if limits were applied for each resource.
|
||||
memoryLimitsDeclared := true
|
||||
cpuLimitsDeclared := true
|
||||
|
||||
limits := resource.PodLimits(pod, resource.PodResourcesOptions{
|
||||
InPlacePodVerticalScalingEnabled: inPlacePodVerticalScalingEnabled,
|
||||
limits := resource.PodLimits(allocatedPod, resource.PodResourcesOptions{
|
||||
ContainerFn: func(res v1.ResourceList, containerType resource.ContainerType) {
|
||||
if res.Cpu().IsZero() {
|
||||
cpuLimitsDeclared = false
|
||||
@ -164,7 +163,7 @@ func ResourceConfigForPod(pod *v1.Pod, enforceCPULimits bool, cpuPeriod uint64,
|
||||
}
|
||||
|
||||
// determine the qos class
|
||||
qosClass := v1qos.GetPodQOS(pod)
|
||||
qosClass := v1qos.GetPodQOS(allocatedPod)
|
||||
|
||||
// build the result
|
||||
result := &ResourceConfig{}
|
||||
|
@ -114,6 +114,7 @@ import (
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/userns"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/manager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/queue"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
|
||||
@ -2795,6 +2796,14 @@ func (kl *Kubelet) canResizePod(pod *v1.Pod) (bool, v1.PodResizeStatus) {
|
||||
func (kl *Kubelet) handlePodResourcesResize(pod *v1.Pod) (*v1.Pod, error) {
|
||||
allocatedPod, updated := kl.statusManager.UpdatePodFromAllocation(pod)
|
||||
if !updated {
|
||||
// Unless a resize is in-progress, clear the resize status.
|
||||
resizeStatus, _ := kl.statusManager.GetPodResizeStatus(string(pod.UID))
|
||||
if resizeStatus != v1.PodResizeStatusInProgress {
|
||||
if err := kl.statusManager.SetPodResizeStatus(pod.UID, ""); err != nil {
|
||||
klog.ErrorS(err, "Failed to clear resize status", "pod", format.Pod(pod))
|
||||
}
|
||||
}
|
||||
|
||||
// Pod is not resizing, nothing more to do here.
|
||||
return allocatedPod, nil
|
||||
}
|
||||
|
@ -33,7 +33,6 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -61,6 +60,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||
"k8s.io/kubernetes/pkg/kubelet/status"
|
||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
utilfs "k8s.io/kubernetes/pkg/util/filesystem"
|
||||
utilkernel "k8s.io/kubernetes/pkg/util/kernel"
|
||||
utilpod "k8s.io/kubernetes/pkg/util/pod"
|
||||
@ -1743,46 +1743,79 @@ func getPhase(pod *v1.Pod, info []v1.ContainerStatus, podIsTerminal bool) v1.Pod
|
||||
}
|
||||
}
|
||||
|
||||
func deleteCustomResourceFromResourceRequirements(target *v1.ResourceRequirements) {
|
||||
for resource := range target.Limits {
|
||||
if resource != v1.ResourceCPU && resource != v1.ResourceMemory && resource != v1.ResourceEphemeralStorage {
|
||||
delete(target.Limits, resource)
|
||||
}
|
||||
func (kl *Kubelet) determinePodResizeStatus(allocatedPod *v1.Pod, podStatus *kubecontainer.PodStatus, podIsTerminal bool) v1.PodResizeStatus {
|
||||
if kubetypes.IsStaticPod(allocatedPod) {
|
||||
return ""
|
||||
}
|
||||
for resource := range target.Requests {
|
||||
if resource != v1.ResourceCPU && resource != v1.ResourceMemory && resource != v1.ResourceEphemeralStorage {
|
||||
delete(target.Requests, resource)
|
||||
|
||||
// If pod is terminal, clear the resize status.
|
||||
if podIsTerminal {
|
||||
if err := kl.statusManager.SetPodResizeStatus(allocatedPod.UID, ""); err != nil {
|
||||
klog.ErrorS(err, "SetPodResizeStatus failed for terminal pod", "pod", format.Pod(allocatedPod))
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
resizeStatus, _ := kl.statusManager.GetPodResizeStatus(string(allocatedPod.UID))
|
||||
// If the resize was in-progress and the actual resources match the allocated resources, mark
|
||||
// the resize as complete by clearing the resize status.
|
||||
if resizeStatus == v1.PodResizeStatusInProgress &&
|
||||
allocatedResourcesMatchStatus(allocatedPod, podStatus) {
|
||||
if err := kl.statusManager.SetPodResizeStatus(allocatedPod.UID, ""); err != nil {
|
||||
klog.ErrorS(err, "SetPodResizeStatus failed", "pod", format.Pod(allocatedPod))
|
||||
}
|
||||
return ""
|
||||
}
|
||||
return resizeStatus
|
||||
}
|
||||
|
||||
func (kl *Kubelet) determinePodResizeStatus(pod *v1.Pod, podStatus *v1.PodStatus) v1.PodResizeStatus {
|
||||
var podResizeStatus v1.PodResizeStatus
|
||||
specStatusDiffer := false
|
||||
for _, c := range pod.Spec.Containers {
|
||||
if cs, ok := podutil.GetContainerStatus(podStatus.ContainerStatuses, c.Name); ok {
|
||||
cResourceCopy := c.Resources.DeepCopy()
|
||||
// for both requests and limits, we only compare the cpu, memory and ephemeralstorage
|
||||
// which are included in convertToAPIContainerStatuses
|
||||
deleteCustomResourceFromResourceRequirements(cResourceCopy)
|
||||
csResourceCopy := cs.Resources.DeepCopy()
|
||||
if csResourceCopy != nil && !cmp.Equal(*cResourceCopy, *csResourceCopy) {
|
||||
specStatusDiffer = true
|
||||
break
|
||||
// allocatedResourcesMatchStatus tests whether the resizeable resources in the pod spec match the
|
||||
// resources reported in the status.
|
||||
func allocatedResourcesMatchStatus(allocatedPod *v1.Pod, podStatus *kubecontainer.PodStatus) bool {
|
||||
for _, c := range allocatedPod.Spec.Containers {
|
||||
if cs := podStatus.FindContainerStatusByName(c.Name); cs != nil {
|
||||
if cs.State != kubecontainer.ContainerStateRunning {
|
||||
// If the container isn't running, it isn't resizing.
|
||||
continue
|
||||
}
|
||||
|
||||
cpuReq, hasCPUReq := c.Resources.Requests[v1.ResourceCPU]
|
||||
cpuLim, hasCPULim := c.Resources.Limits[v1.ResourceCPU]
|
||||
memLim, hasMemLim := c.Resources.Limits[v1.ResourceMemory]
|
||||
|
||||
if cs.Resources == nil {
|
||||
if hasCPUReq || hasCPULim || hasMemLim {
|
||||
// Container status is missing Resources information, but the container does
|
||||
// have resizable resources configured.
|
||||
klog.ErrorS(nil, "Missing runtime resources information for resizing container",
|
||||
"pod", format.Pod(allocatedPod), "container", c.Name)
|
||||
return false // We don't want to clear resize status with insufficient information.
|
||||
} else {
|
||||
// No resizable resources configured; this might be ok.
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Only compare resizeable resources, and only compare resources that are explicitly configured.
|
||||
if hasCPUReq {
|
||||
if !cpuReq.Equal(*cs.Resources.CPURequest) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if hasCPULim {
|
||||
if !cpuLim.Equal(*cs.Resources.CPULimit) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if hasMemLim {
|
||||
if !memLim.Equal(*cs.Resources.MemoryLimit) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if !specStatusDiffer {
|
||||
// Clear last resize state from checkpoint
|
||||
if err := kl.statusManager.SetPodResizeStatus(pod.UID, ""); err != nil {
|
||||
klog.ErrorS(err, "SetPodResizeStatus failed", "pod", pod.Name)
|
||||
}
|
||||
} else {
|
||||
if resizeStatus, found := kl.statusManager.GetPodResizeStatus(string(pod.UID)); found {
|
||||
podResizeStatus = resizeStatus
|
||||
}
|
||||
}
|
||||
return podResizeStatus
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// generateAPIPodStatus creates the final API pod status for a pod, given the
|
||||
@ -1796,7 +1829,7 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
|
||||
}
|
||||
s := kl.convertStatusToAPIStatus(pod, podStatus, oldPodStatus)
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
|
||||
s.Resize = kl.determinePodResizeStatus(pod, s)
|
||||
s.Resize = kl.determinePodResizeStatus(pod, podStatus, podIsTerminal)
|
||||
}
|
||||
// calculate the next phase and preserve reason
|
||||
allStatus := append(append([]v1.ContainerStatus{}, s.ContainerStatuses...), s.InitContainerStatuses...)
|
||||
@ -2076,103 +2109,63 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon
|
||||
}
|
||||
|
||||
convertContainerStatusResources := func(cName string, status *v1.ContainerStatus, cStatus *kubecontainer.Status, oldStatuses map[string]v1.ContainerStatus) *v1.ResourceRequirements {
|
||||
var requests, limits v1.ResourceList
|
||||
// oldStatus should always exist if container is running
|
||||
oldStatus, oldStatusFound := oldStatuses[cName]
|
||||
// Initialize limits/requests from container's spec upon transition to Running state
|
||||
// For cpu & memory, values queried from runtime via CRI always supercedes spec values
|
||||
// For ephemeral-storage, a running container's status.limit/request equals spec.limit/request
|
||||
determineResource := func(rName v1.ResourceName, v1ContainerResource, oldStatusResource, resource v1.ResourceList) {
|
||||
if oldStatusFound {
|
||||
if oldStatus.State.Running == nil || status.ContainerID != oldStatus.ContainerID {
|
||||
if r, exists := v1ContainerResource[rName]; exists {
|
||||
resource[rName] = r.DeepCopy()
|
||||
}
|
||||
} else {
|
||||
if oldStatusResource != nil {
|
||||
if r, exists := oldStatusResource[rName]; exists {
|
||||
resource[rName] = r.DeepCopy()
|
||||
}
|
||||
}
|
||||
|
||||
// If the new status is missing resources, then if the container is running and previous
|
||||
// status was also running, preserve the resources previously reported.
|
||||
preserveOldResourcesValue := func(rName v1.ResourceName, oldStatusResource, resource v1.ResourceList) {
|
||||
if cStatus.State == kubecontainer.ContainerStateRunning &&
|
||||
oldStatusFound && oldStatus.State.Running != nil &&
|
||||
status.ContainerID == oldStatus.ContainerID &&
|
||||
oldStatusResource != nil {
|
||||
if r, exists := oldStatusResource[rName]; exists {
|
||||
resource[rName] = r.DeepCopy()
|
||||
}
|
||||
}
|
||||
}
|
||||
container := kubecontainer.GetContainerSpec(pod, cName)
|
||||
|
||||
// Always set the status to the latest allocated resources, even if it differs from the
|
||||
// allocation used by the current sync loop.
|
||||
alloc, found := kl.statusManager.GetContainerResourceAllocation(string(pod.UID), cName)
|
||||
if found {
|
||||
status.AllocatedResources = alloc.Requests
|
||||
} else if !(container.Resources.Requests == nil && container.Resources.Limits == nil) {
|
||||
// This case is expected for ephemeral containers.
|
||||
if oldStatusFound {
|
||||
status.AllocatedResources = oldStatus.AllocatedResources
|
||||
}
|
||||
if !found {
|
||||
// This case is expected for non-resizable containers (ephemeral & non-restartable init containers).
|
||||
// Don't set status.Resources in this case.
|
||||
return nil
|
||||
}
|
||||
if cStatus.State != kubecontainer.ContainerStateRunning {
|
||||
// If the container isn't running, just use the allocated resources.
|
||||
return &alloc
|
||||
}
|
||||
if oldStatus.Resources == nil {
|
||||
oldStatus.Resources = &v1.ResourceRequirements{}
|
||||
}
|
||||
|
||||
convertCustomResources := func(inResources, outResources v1.ResourceList) {
|
||||
for resourceName, resourceQuantity := range inResources {
|
||||
if resourceName == v1.ResourceCPU || resourceName == v1.ResourceMemory ||
|
||||
resourceName == v1.ResourceStorage || resourceName == v1.ResourceEphemeralStorage {
|
||||
continue
|
||||
}
|
||||
|
||||
outResources[resourceName] = resourceQuantity.DeepCopy()
|
||||
}
|
||||
}
|
||||
|
||||
// Convert Limits
|
||||
if alloc.Limits != nil {
|
||||
limits = make(v1.ResourceList)
|
||||
// Status resources default to the allocated resources.
|
||||
// For non-running containers this will be the reported values.
|
||||
// For non-resizable resources, these values will also be used.
|
||||
resources := alloc
|
||||
if resources.Limits != nil {
|
||||
if cStatus.Resources != nil && cStatus.Resources.CPULimit != nil {
|
||||
limits[v1.ResourceCPU] = cStatus.Resources.CPULimit.DeepCopy()
|
||||
resources.Limits[v1.ResourceCPU] = cStatus.Resources.CPULimit.DeepCopy()
|
||||
} else {
|
||||
determineResource(v1.ResourceCPU, alloc.Limits, oldStatus.Resources.Limits, limits)
|
||||
preserveOldResourcesValue(v1.ResourceCPU, oldStatus.Resources.Limits, resources.Limits)
|
||||
}
|
||||
if cStatus.Resources != nil && cStatus.Resources.MemoryLimit != nil {
|
||||
limits[v1.ResourceMemory] = cStatus.Resources.MemoryLimit.DeepCopy()
|
||||
resources.Limits[v1.ResourceMemory] = cStatus.Resources.MemoryLimit.DeepCopy()
|
||||
} else {
|
||||
determineResource(v1.ResourceMemory, alloc.Limits, oldStatus.Resources.Limits, limits)
|
||||
preserveOldResourcesValue(v1.ResourceMemory, oldStatus.Resources.Limits, resources.Limits)
|
||||
}
|
||||
if ephemeralStorage, found := alloc.Limits[v1.ResourceEphemeralStorage]; found {
|
||||
limits[v1.ResourceEphemeralStorage] = ephemeralStorage.DeepCopy()
|
||||
}
|
||||
if storage, found := alloc.Limits[v1.ResourceStorage]; found {
|
||||
limits[v1.ResourceStorage] = storage.DeepCopy()
|
||||
}
|
||||
|
||||
convertCustomResources(alloc.Limits, limits)
|
||||
}
|
||||
// Convert Requests
|
||||
if alloc.Requests != nil {
|
||||
requests = make(v1.ResourceList)
|
||||
if resources.Requests != nil {
|
||||
if cStatus.Resources != nil && cStatus.Resources.CPURequest != nil {
|
||||
requests[v1.ResourceCPU] = cStatus.Resources.CPURequest.DeepCopy()
|
||||
resources.Requests[v1.ResourceCPU] = cStatus.Resources.CPURequest.DeepCopy()
|
||||
} else {
|
||||
determineResource(v1.ResourceCPU, alloc.Requests, oldStatus.Resources.Requests, requests)
|
||||
preserveOldResourcesValue(v1.ResourceCPU, oldStatus.Resources.Requests, resources.Requests)
|
||||
}
|
||||
if memory, found := alloc.Requests[v1.ResourceMemory]; found {
|
||||
requests[v1.ResourceMemory] = memory.DeepCopy()
|
||||
}
|
||||
if ephemeralStorage, found := alloc.Requests[v1.ResourceEphemeralStorage]; found {
|
||||
requests[v1.ResourceEphemeralStorage] = ephemeralStorage.DeepCopy()
|
||||
}
|
||||
if storage, found := alloc.Requests[v1.ResourceStorage]; found {
|
||||
requests[v1.ResourceStorage] = storage.DeepCopy()
|
||||
}
|
||||
|
||||
convertCustomResources(alloc.Requests, requests)
|
||||
}
|
||||
|
||||
resources := &v1.ResourceRequirements{
|
||||
Limits: limits,
|
||||
Requests: requests,
|
||||
}
|
||||
return resources
|
||||
return &resources
|
||||
}
|
||||
|
||||
convertContainerStatusUser := func(cStatus *kubecontainer.Status) *v1.ContainerUser {
|
||||
@ -2341,10 +2334,15 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon
|
||||
}
|
||||
status := convertContainerStatus(cStatus, oldStatusPtr)
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
|
||||
if status.State.Running != nil {
|
||||
status.Resources = convertContainerStatusResources(cName, status, cStatus, oldStatuses)
|
||||
status.Resources = convertContainerStatusResources(cName, status, cStatus, oldStatuses)
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScalingAllocatedStatus) {
|
||||
if alloc, found := kl.statusManager.GetContainerResourceAllocation(string(pod.UID), cName); found {
|
||||
status.AllocatedResources = alloc.Requests
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.SupplementalGroupsPolicy) {
|
||||
status.User = convertContainerStatusUser(cStatus)
|
||||
}
|
||||
|
@ -4543,6 +4543,7 @@ func TestConvertToAPIContainerStatusesDataRace(t *testing.T) {
|
||||
|
||||
func TestConvertToAPIContainerStatusesForResources(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
|
||||
|
||||
nowTime := time.Now()
|
||||
testContainerName := "ctr0"
|
||||
testContainerID := kubecontainer.ContainerID{Type: "test", ID: testContainerName}
|
||||
@ -4566,26 +4567,39 @@ func TestConvertToAPIContainerStatusesForResources(t *testing.T) {
|
||||
ContainerStatuses: []v1.ContainerStatus{testContainerStatus},
|
||||
},
|
||||
}
|
||||
testKubeContainerStatus := kubecontainer.Status{
|
||||
Name: testContainerName,
|
||||
ID: testContainerID,
|
||||
Image: "img",
|
||||
ImageID: "1234",
|
||||
ImageRef: "img1234",
|
||||
State: kubecontainer.ContainerStateRunning,
|
||||
StartedAt: nowTime,
|
||||
}
|
||||
testPodStatus := &kubecontainer.PodStatus{
|
||||
ID: testPod.UID,
|
||||
Name: testPod.Name,
|
||||
Namespace: testPod.Namespace,
|
||||
ContainerStatuses: []*kubecontainer.Status{&testKubeContainerStatus},
|
||||
|
||||
testPodStatus := func(state kubecontainer.State, resources *kubecontainer.ContainerResources) *kubecontainer.PodStatus {
|
||||
cStatus := kubecontainer.Status{
|
||||
Name: testContainerName,
|
||||
ID: testContainerID,
|
||||
Image: "img",
|
||||
ImageID: "1234",
|
||||
ImageRef: "img1234",
|
||||
State: state,
|
||||
Resources: resources,
|
||||
}
|
||||
switch state {
|
||||
case kubecontainer.ContainerStateRunning:
|
||||
cStatus.StartedAt = nowTime
|
||||
case kubecontainer.ContainerStateExited:
|
||||
cStatus.StartedAt = nowTime
|
||||
cStatus.FinishedAt = nowTime
|
||||
}
|
||||
return &kubecontainer.PodStatus{
|
||||
ID: testPod.UID,
|
||||
Name: testPod.Name,
|
||||
Namespace: testPod.Namespace,
|
||||
ContainerStatuses: []*kubecontainer.Status{&cStatus},
|
||||
}
|
||||
}
|
||||
|
||||
CPU1AndMem1G := v1.ResourceList{v1.ResourceCPU: resource.MustParse("1"), v1.ResourceMemory: resource.MustParse("1Gi")}
|
||||
CPU2AndMem2G := v1.ResourceList{v1.ResourceCPU: resource.MustParse("2"), v1.ResourceMemory: resource.MustParse("2Gi")}
|
||||
CPU1AndMem1GAndStorage2G := CPU1AndMem1G.DeepCopy()
|
||||
CPU1AndMem1GAndStorage2G[v1.ResourceEphemeralStorage] = resource.MustParse("2Gi")
|
||||
CPU1AndMem1GAndStorage2G[v1.ResourceStorage] = resource.MustParse("2Gi")
|
||||
CPU1AndMem2GAndStorage2G := CPU1AndMem1GAndStorage2G.DeepCopy()
|
||||
CPU1AndMem2GAndStorage2G[v1.ResourceMemory] = resource.MustParse("2Gi")
|
||||
CPU2AndMem2GAndStorage2G := CPU2AndMem2G.DeepCopy()
|
||||
CPU2AndMem2GAndStorage2G[v1.ResourceEphemeralStorage] = resource.MustParse("2Gi")
|
||||
CPU2AndMem2GAndStorage2G[v1.ResourceStorage] = resource.MustParse("2Gi")
|
||||
@ -4611,258 +4625,314 @@ func TestConvertToAPIContainerStatusesForResources(t *testing.T) {
|
||||
|
||||
idx := 0
|
||||
for tdesc, tc := range map[string]struct {
|
||||
Resources []v1.ResourceRequirements
|
||||
OldStatus []v1.ContainerStatus
|
||||
Expected []v1.ContainerStatus
|
||||
State kubecontainer.State // Defaults to Running
|
||||
Resources v1.ResourceRequirements
|
||||
AllocatedResources *v1.ResourceRequirements // Defaults to Resources
|
||||
OldStatus v1.ContainerStatus
|
||||
Expected v1.ContainerStatus
|
||||
}{
|
||||
"GuaranteedQoSPod with CPU and memory CRI status": {
|
||||
Resources: []v1.ResourceRequirements{{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G}},
|
||||
OldStatus: []v1.ContainerStatus{
|
||||
{
|
||||
Name: testContainerName,
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
|
||||
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
|
||||
},
|
||||
Resources: v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
|
||||
OldStatus: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
|
||||
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
|
||||
},
|
||||
Expected: []v1.ContainerStatus{
|
||||
{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
|
||||
AllocatedResources: CPU1AndMem1G,
|
||||
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
|
||||
},
|
||||
Expected: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
|
||||
AllocatedResources: CPU1AndMem1G,
|
||||
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
|
||||
},
|
||||
},
|
||||
"BurstableQoSPod with CPU and memory CRI status": {
|
||||
Resources: []v1.ResourceRequirements{{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G}},
|
||||
OldStatus: []v1.ContainerStatus{
|
||||
{
|
||||
Name: testContainerName,
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
|
||||
Resources: &v1.ResourceRequirements{Limits: CPU2AndMem2G, Requests: CPU1AndMem1G},
|
||||
},
|
||||
Resources: v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
|
||||
OldStatus: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
|
||||
Resources: &v1.ResourceRequirements{Limits: CPU2AndMem2G, Requests: CPU1AndMem1G},
|
||||
},
|
||||
Expected: []v1.ContainerStatus{
|
||||
{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
|
||||
AllocatedResources: CPU1AndMem1G,
|
||||
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
|
||||
},
|
||||
Expected: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
|
||||
AllocatedResources: CPU1AndMem1G,
|
||||
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
|
||||
},
|
||||
},
|
||||
"GuaranteedQoSPod with CPU and memory CRI status, with ephemeral storage": {
|
||||
Resources: []v1.ResourceRequirements{{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G}},
|
||||
OldStatus: []v1.ContainerStatus{
|
||||
{
|
||||
Name: testContainerName,
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
|
||||
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
|
||||
},
|
||||
Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
|
||||
OldStatus: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
|
||||
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1G, Requests: CPU1AndMem1G},
|
||||
},
|
||||
Expected: []v1.ContainerStatus{
|
||||
{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
|
||||
AllocatedResources: CPU1AndMem1GAndStorage2G,
|
||||
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
|
||||
},
|
||||
Expected: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
|
||||
AllocatedResources: CPU1AndMem1GAndStorage2G,
|
||||
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
|
||||
},
|
||||
},
|
||||
"BurstableQoSPod with CPU and memory CRI status, with ephemeral storage": {
|
||||
Resources: []v1.ResourceRequirements{{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G}},
|
||||
OldStatus: []v1.ContainerStatus{
|
||||
{
|
||||
Name: testContainerName,
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
|
||||
Resources: &v1.ResourceRequirements{Limits: CPU2AndMem2GAndStorage2G, Requests: CPU2AndMem2GAndStorage2G},
|
||||
},
|
||||
Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
|
||||
OldStatus: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
|
||||
Resources: &v1.ResourceRequirements{Limits: CPU2AndMem2GAndStorage2G, Requests: CPU2AndMem2GAndStorage2G},
|
||||
},
|
||||
Expected: []v1.ContainerStatus{
|
||||
{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
|
||||
AllocatedResources: CPU1AndMem1GAndStorage2G,
|
||||
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
|
||||
},
|
||||
Expected: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
|
||||
AllocatedResources: CPU1AndMem1GAndStorage2G,
|
||||
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
|
||||
},
|
||||
},
|
||||
"BurstableQoSPod with CPU and memory CRI status, with ephemeral storage, nil resources in OldStatus": {
|
||||
Resources: []v1.ResourceRequirements{{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G}},
|
||||
OldStatus: []v1.ContainerStatus{
|
||||
{
|
||||
Name: testContainerName,
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
|
||||
},
|
||||
Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
|
||||
OldStatus: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
|
||||
},
|
||||
Expected: []v1.ContainerStatus{
|
||||
{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
|
||||
AllocatedResources: CPU1AndMem1GAndStorage2G,
|
||||
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
|
||||
},
|
||||
Expected: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
|
||||
AllocatedResources: CPU1AndMem1GAndStorage2G,
|
||||
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
|
||||
},
|
||||
},
|
||||
"BestEffortQoSPod": {
|
||||
OldStatus: []v1.ContainerStatus{
|
||||
{
|
||||
Name: testContainerName,
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
|
||||
Resources: &v1.ResourceRequirements{},
|
||||
},
|
||||
OldStatus: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
|
||||
Resources: &v1.ResourceRequirements{},
|
||||
},
|
||||
Expected: []v1.ContainerStatus{
|
||||
{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
|
||||
Resources: &v1.ResourceRequirements{},
|
||||
},
|
||||
Expected: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
|
||||
Resources: &v1.ResourceRequirements{},
|
||||
},
|
||||
},
|
||||
"BestEffort QoSPod with extended resources": {
|
||||
Resources: []v1.ResourceRequirements{{Requests: addExtendedResource(v1.ResourceList{})}},
|
||||
OldStatus: []v1.ContainerStatus{
|
||||
{
|
||||
Name: testContainerName,
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
|
||||
Resources: &v1.ResourceRequirements{},
|
||||
},
|
||||
Resources: v1.ResourceRequirements{Requests: addExtendedResource(v1.ResourceList{})},
|
||||
OldStatus: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
|
||||
Resources: &v1.ResourceRequirements{},
|
||||
},
|
||||
Expected: []v1.ContainerStatus{
|
||||
{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
|
||||
AllocatedResources: addExtendedResource(v1.ResourceList{}),
|
||||
Resources: &v1.ResourceRequirements{Requests: addExtendedResource(v1.ResourceList{})},
|
||||
},
|
||||
Expected: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
|
||||
AllocatedResources: addExtendedResource(v1.ResourceList{}),
|
||||
Resources: &v1.ResourceRequirements{Requests: addExtendedResource(v1.ResourceList{})},
|
||||
},
|
||||
},
|
||||
"BurstableQoSPod with extended resources": {
|
||||
Resources: []v1.ResourceRequirements{{Requests: addExtendedResource(CPU1AndMem1G)}},
|
||||
OldStatus: []v1.ContainerStatus{
|
||||
{
|
||||
Name: testContainerName,
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
|
||||
Resources: &v1.ResourceRequirements{},
|
||||
},
|
||||
Resources: v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1G)},
|
||||
OldStatus: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
|
||||
Resources: &v1.ResourceRequirements{},
|
||||
},
|
||||
Expected: []v1.ContainerStatus{
|
||||
{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
|
||||
AllocatedResources: addExtendedResource(CPU1AndMem1G),
|
||||
Resources: &v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1G)},
|
||||
},
|
||||
Expected: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
|
||||
AllocatedResources: addExtendedResource(CPU1AndMem1G),
|
||||
Resources: &v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1G)},
|
||||
},
|
||||
},
|
||||
"BurstableQoSPod with storage, ephemeral storage and extended resources": {
|
||||
Resources: []v1.ResourceRequirements{{Requests: addExtendedResource(CPU1AndMem1GAndStorage2G)}},
|
||||
OldStatus: []v1.ContainerStatus{
|
||||
{
|
||||
Name: testContainerName,
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
|
||||
Resources: &v1.ResourceRequirements{},
|
||||
},
|
||||
Resources: v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1GAndStorage2G)},
|
||||
OldStatus: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
|
||||
Resources: &v1.ResourceRequirements{},
|
||||
},
|
||||
Expected: []v1.ContainerStatus{
|
||||
{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
|
||||
AllocatedResources: addExtendedResource(CPU1AndMem1GAndStorage2G),
|
||||
Resources: &v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1GAndStorage2G)},
|
||||
},
|
||||
Expected: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
|
||||
AllocatedResources: addExtendedResource(CPU1AndMem1GAndStorage2G),
|
||||
Resources: &v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1GAndStorage2G)},
|
||||
},
|
||||
},
|
||||
"GuaranteedQoSPod with extended resources": {
|
||||
Resources: []v1.ResourceRequirements{{Requests: addExtendedResource(CPU1AndMem1G), Limits: addExtendedResource(CPU1AndMem1G)}},
|
||||
OldStatus: []v1.ContainerStatus{
|
||||
{
|
||||
Name: testContainerName,
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
|
||||
Resources: &v1.ResourceRequirements{},
|
||||
},
|
||||
Resources: v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1G), Limits: addExtendedResource(CPU1AndMem1G)},
|
||||
OldStatus: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
|
||||
Resources: &v1.ResourceRequirements{},
|
||||
},
|
||||
Expected: []v1.ContainerStatus{
|
||||
{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
|
||||
AllocatedResources: addExtendedResource(CPU1AndMem1G),
|
||||
Resources: &v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1G), Limits: addExtendedResource(CPU1AndMem1G)},
|
||||
},
|
||||
Expected: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
|
||||
AllocatedResources: addExtendedResource(CPU1AndMem1G),
|
||||
Resources: &v1.ResourceRequirements{Requests: addExtendedResource(CPU1AndMem1G), Limits: addExtendedResource(CPU1AndMem1G)},
|
||||
},
|
||||
},
|
||||
"newly created Pod": {
|
||||
State: kubecontainer.ContainerStateCreated,
|
||||
Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
|
||||
OldStatus: v1.ContainerStatus{},
|
||||
Expected: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}},
|
||||
AllocatedResources: CPU1AndMem1GAndStorage2G,
|
||||
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
|
||||
},
|
||||
},
|
||||
"newly running Pod": {
|
||||
Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
|
||||
OldStatus: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Waiting: &v1.ContainerStateWaiting{}},
|
||||
},
|
||||
Expected: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
|
||||
AllocatedResources: CPU1AndMem1GAndStorage2G,
|
||||
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
|
||||
},
|
||||
},
|
||||
"newly terminated Pod": {
|
||||
State: kubecontainer.ContainerStateExited,
|
||||
// Actual resources were different, but they should be ignored once the container is terminated.
|
||||
Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
|
||||
AllocatedResources: &v1.ResourceRequirements{Limits: CPU2AndMem2GAndStorage2G, Requests: CPU2AndMem2GAndStorage2G},
|
||||
OldStatus: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
|
||||
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
|
||||
},
|
||||
Expected: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Terminated: &v1.ContainerStateTerminated{
|
||||
ContainerID: testContainerID.String(),
|
||||
StartedAt: metav1.NewTime(nowTime),
|
||||
FinishedAt: metav1.NewTime(nowTime),
|
||||
}},
|
||||
AllocatedResources: CPU2AndMem2GAndStorage2G,
|
||||
Resources: &v1.ResourceRequirements{Limits: CPU2AndMem2GAndStorage2G, Requests: CPU2AndMem2GAndStorage2G},
|
||||
},
|
||||
},
|
||||
"resizing Pod": {
|
||||
Resources: v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
|
||||
AllocatedResources: &v1.ResourceRequirements{Limits: CPU2AndMem2GAndStorage2G, Requests: CPU2AndMem2GAndStorage2G},
|
||||
OldStatus: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{}},
|
||||
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem1GAndStorage2G},
|
||||
},
|
||||
Expected: v1.ContainerStatus{
|
||||
Name: testContainerName,
|
||||
ContainerID: testContainerID.String(),
|
||||
Image: "img",
|
||||
ImageID: "img1234",
|
||||
State: v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: metav1.NewTime(nowTime)}},
|
||||
AllocatedResources: CPU2AndMem2GAndStorage2G,
|
||||
Resources: &v1.ResourceRequirements{Limits: CPU1AndMem1GAndStorage2G, Requests: CPU1AndMem2GAndStorage2G},
|
||||
},
|
||||
},
|
||||
} {
|
||||
tPod := testPod.DeepCopy()
|
||||
tPod.Name = fmt.Sprintf("%s-%d", testPod.Name, idx)
|
||||
for i := range tPod.Spec.Containers {
|
||||
if tc.Resources != nil {
|
||||
tPod.Spec.Containers[i].Resources = tc.Resources[i]
|
||||
t.Run(tdesc, func(t *testing.T) {
|
||||
tPod := testPod.DeepCopy()
|
||||
tPod.Name = fmt.Sprintf("%s-%d", testPod.Name, idx)
|
||||
|
||||
if tc.AllocatedResources != nil {
|
||||
tPod.Spec.Containers[0].Resources = *tc.AllocatedResources
|
||||
} else {
|
||||
tPod.Spec.Containers[0].Resources = tc.Resources
|
||||
}
|
||||
kubelet.statusManager.SetPodAllocation(tPod)
|
||||
if tc.Resources != nil {
|
||||
tPod.Status.ContainerStatuses[i].AllocatedResources = tc.Resources[i].Requests
|
||||
testPodStatus.ContainerStatuses[i].Resources = &kubecontainer.ContainerResources{
|
||||
MemoryLimit: tc.Resources[i].Limits.Memory(),
|
||||
CPULimit: tc.Resources[i].Limits.Cpu(),
|
||||
CPURequest: tc.Resources[i].Requests.Cpu(),
|
||||
}
|
||||
resources := &kubecontainer.ContainerResources{
|
||||
MemoryLimit: tc.Resources.Limits.Memory(),
|
||||
CPULimit: tc.Resources.Limits.Cpu(),
|
||||
CPURequest: tc.Resources.Requests.Cpu(),
|
||||
}
|
||||
}
|
||||
state := kubecontainer.ContainerStateRunning
|
||||
if tc.State != "" {
|
||||
state = tc.State
|
||||
}
|
||||
podStatus := testPodStatus(state, resources)
|
||||
|
||||
t.Logf("TestCase: %q", tdesc)
|
||||
cStatuses := kubelet.convertToAPIContainerStatuses(tPod, testPodStatus, tc.OldStatus, tPod.Spec.Containers, false, false)
|
||||
assert.Equal(t, tc.Expected, cStatuses)
|
||||
for _, enableAllocatedStatus := range []bool{true, false} {
|
||||
t.Run(fmt.Sprintf("AllocatedStatus=%t", enableAllocatedStatus), func(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingAllocatedStatus, enableAllocatedStatus)
|
||||
|
||||
expected := tc.Expected
|
||||
if !enableAllocatedStatus {
|
||||
expected = *expected.DeepCopy()
|
||||
expected.AllocatedResources = nil
|
||||
}
|
||||
|
||||
cStatuses := kubelet.convertToAPIContainerStatuses(tPod, podStatus, []v1.ContainerStatus{tc.OldStatus}, tPod.Spec.Containers, false, false)
|
||||
assert.Equal(t, expected, cStatuses[0])
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -6422,3 +6492,155 @@ func TestResolveRecursiveReadOnly(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllocatedResourcesMatchStatus(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
allocatedResources v1.ResourceRequirements
|
||||
statusResources *kubecontainer.ContainerResources
|
||||
statusTerminated bool
|
||||
expectMatch bool
|
||||
}{{
|
||||
name: "guaranteed pod: match",
|
||||
allocatedResources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100M"),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100M"),
|
||||
},
|
||||
},
|
||||
statusResources: &kubecontainer.ContainerResources{
|
||||
CPURequest: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
CPULimit: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
MemoryLimit: resource.NewScaledQuantity(100, 6),
|
||||
},
|
||||
expectMatch: true,
|
||||
}, {
|
||||
name: "guaranteed pod: cpu request mismatch",
|
||||
allocatedResources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100M"),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100M"),
|
||||
},
|
||||
},
|
||||
statusResources: &kubecontainer.ContainerResources{
|
||||
CPURequest: resource.NewMilliQuantity(50, resource.DecimalSI),
|
||||
CPULimit: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
MemoryLimit: resource.NewScaledQuantity(100, 6),
|
||||
},
|
||||
expectMatch: false,
|
||||
}, {
|
||||
name: "guaranteed pod: cpu limit mismatch",
|
||||
allocatedResources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100M"),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100M"),
|
||||
},
|
||||
},
|
||||
statusResources: &kubecontainer.ContainerResources{
|
||||
CPURequest: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
CPULimit: resource.NewMilliQuantity(50, resource.DecimalSI),
|
||||
MemoryLimit: resource.NewScaledQuantity(100, 6),
|
||||
},
|
||||
expectMatch: false,
|
||||
}, {
|
||||
name: "guaranteed pod: memory limit mismatch",
|
||||
allocatedResources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100M"),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100M"),
|
||||
},
|
||||
},
|
||||
statusResources: &kubecontainer.ContainerResources{
|
||||
CPURequest: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
CPULimit: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
MemoryLimit: resource.NewScaledQuantity(50, 6),
|
||||
},
|
||||
expectMatch: false,
|
||||
}, {
|
||||
name: "guaranteed pod: terminated mismatch",
|
||||
allocatedResources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100M"),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("100M"),
|
||||
},
|
||||
},
|
||||
statusResources: &kubecontainer.ContainerResources{
|
||||
CPURequest: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
CPULimit: resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
MemoryLimit: resource.NewScaledQuantity(50, 6),
|
||||
},
|
||||
statusTerminated: true,
|
||||
expectMatch: true,
|
||||
}, {
|
||||
name: "burstable: no cpu request",
|
||||
allocatedResources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("100M"),
|
||||
},
|
||||
},
|
||||
statusResources: &kubecontainer.ContainerResources{
|
||||
CPURequest: resource.NewMilliQuantity(2, resource.DecimalSI),
|
||||
},
|
||||
expectMatch: true,
|
||||
}, {
|
||||
name: "best effort",
|
||||
allocatedResources: v1.ResourceRequirements{},
|
||||
statusResources: &kubecontainer.ContainerResources{
|
||||
CPURequest: resource.NewMilliQuantity(2, resource.DecimalSI),
|
||||
},
|
||||
expectMatch: true,
|
||||
}}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
allocatedPod := v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "c",
|
||||
Resources: test.allocatedResources,
|
||||
}},
|
||||
},
|
||||
}
|
||||
state := kubecontainer.ContainerStateRunning
|
||||
if test.statusTerminated {
|
||||
state = kubecontainer.ContainerStateExited
|
||||
}
|
||||
podStatus := &kubecontainer.PodStatus{
|
||||
Name: "test",
|
||||
ContainerStatuses: []*kubecontainer.Status{
|
||||
{
|
||||
Name: "c",
|
||||
State: state,
|
||||
Resources: test.statusResources,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
match := allocatedResourcesMatchStatus(&allocatedPod, podStatus)
|
||||
assert.Equal(t, test.expectMatch, match)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -365,7 +365,7 @@ func PodUsageFunc(obj runtime.Object, clock clock.Clock) (corev1.ResourceList, e
|
||||
}
|
||||
|
||||
opts := resourcehelper.PodResourcesOptions{
|
||||
InPlacePodVerticalScalingEnabled: feature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
|
||||
UseStatusResources: feature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
|
||||
}
|
||||
requests := resourcehelper.PodRequests(pod, opts)
|
||||
limits := resourcehelper.PodLimits(pod, opts)
|
||||
|
@ -906,7 +906,7 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
|
||||
usageFgEnabled corev1.ResourceList
|
||||
usageFgDisabled corev1.ResourceList
|
||||
}{
|
||||
"verify Max(Container.Spec.Requests, ContainerStatus.AllocatedResources) for memory resource": {
|
||||
"verify Max(Container.Spec.Requests, ContainerStatus.Resources) for memory resource": {
|
||||
pod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
@ -925,8 +925,10 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
|
||||
Status: api.PodStatus{
|
||||
ContainerStatuses: []api.ContainerStatus{
|
||||
{
|
||||
AllocatedResources: api.ResourceList{
|
||||
api.ResourceMemory: resource.MustParse("150Mi"),
|
||||
Resources: &api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceMemory: resource.MustParse("150Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -947,7 +949,7 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
|
||||
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
"verify Max(Container.Spec.Requests, ContainerStatus.AllocatedResources) for CPU resource": {
|
||||
"verify Max(Container.Spec.Requests, ContainerStatus.Resources) for CPU resource": {
|
||||
pod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
@ -966,8 +968,10 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
|
||||
Status: api.PodStatus{
|
||||
ContainerStatuses: []api.ContainerStatus{
|
||||
{
|
||||
AllocatedResources: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("150m"),
|
||||
Resources: &api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("150m"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -988,7 +992,7 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
|
||||
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
"verify Max(Container.Spec.Requests, ContainerStatus.AllocatedResources) for CPU and memory resource": {
|
||||
"verify Max(Container.Spec.Requests, ContainerStatus.Resources) for CPU and memory resource": {
|
||||
pod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
@ -1009,9 +1013,11 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
|
||||
Status: api.PodStatus{
|
||||
ContainerStatuses: []api.ContainerStatus{
|
||||
{
|
||||
AllocatedResources: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("150m"),
|
||||
api.ResourceMemory: resource.MustParse("250Mi"),
|
||||
Resources: &api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("150m"),
|
||||
api.ResourceMemory: resource.MustParse("250Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -1038,7 +1044,7 @@ func TestPodEvaluatorUsageResourceResize(t *testing.T) {
|
||||
generic.ObjectCountQuotaResourceNameFor(schema.GroupResource{Resource: "pods"}): resource.MustParse("1"),
|
||||
},
|
||||
},
|
||||
"verify Max(Container.Spec.Requests, ContainerStatus.AllocatedResources==nil) for CPU and memory resource": {
|
||||
"verify Max(Container.Spec.Requests, ContainerStatus.Resources==nil) for CPU and memory resource": {
|
||||
pod: &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
|
@ -91,7 +91,7 @@ type podChangeExtractor func(newPod *v1.Pod, oldPod *v1.Pod) ActionType
|
||||
// extractPodScaleDown interprets the update of a pod and returns PodRequestScaledDown event if any pod's resource request(s) is scaled down.
|
||||
func extractPodScaleDown(newPod, oldPod *v1.Pod) ActionType {
|
||||
opt := resource.PodResourcesOptions{
|
||||
InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
|
||||
UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
|
||||
}
|
||||
newPodRequests := resource.PodRequests(newPod, opt)
|
||||
oldPodRequests := resource.PodRequests(oldPod, opt)
|
||||
|
@ -330,11 +330,11 @@ func (f *Fit) isSchedulableAfterPodScaleDown(targetPod, originalPod, modifiedPod
|
||||
|
||||
// the other pod was scheduled, so modification or deletion may free up some resources.
|
||||
originalMaxResourceReq, modifiedMaxResourceReq := &framework.Resource{}, &framework.Resource{}
|
||||
originalMaxResourceReq.SetMaxResource(resource.PodRequests(originalPod, resource.PodResourcesOptions{InPlacePodVerticalScalingEnabled: f.enableInPlacePodVerticalScaling}))
|
||||
modifiedMaxResourceReq.SetMaxResource(resource.PodRequests(modifiedPod, resource.PodResourcesOptions{InPlacePodVerticalScalingEnabled: f.enableInPlacePodVerticalScaling}))
|
||||
originalMaxResourceReq.SetMaxResource(resource.PodRequests(originalPod, resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling}))
|
||||
modifiedMaxResourceReq.SetMaxResource(resource.PodRequests(modifiedPod, resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling}))
|
||||
|
||||
// check whether the resource request of the modified pod is less than the original pod.
|
||||
podRequests := resource.PodRequests(targetPod, resource.PodResourcesOptions{InPlacePodVerticalScalingEnabled: f.enableInPlacePodVerticalScaling})
|
||||
podRequests := resource.PodRequests(targetPod, resource.PodResourcesOptions{UseStatusResources: f.enableInPlacePodVerticalScaling})
|
||||
for rName, rValue := range podRequests {
|
||||
if rValue.IsZero() {
|
||||
// We only care about the resources requested by the pod we are trying to schedule.
|
||||
|
@ -118,7 +118,7 @@ func (r *resourceAllocationScorer) calculateResourceAllocatableRequest(logger kl
|
||||
func (r *resourceAllocationScorer) calculatePodResourceRequest(pod *v1.Pod, resourceName v1.ResourceName) int64 {
|
||||
|
||||
opts := resourcehelper.PodResourcesOptions{
|
||||
InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
|
||||
UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
|
||||
}
|
||||
if !r.useRequested {
|
||||
opts.NonMissingContainerRequests = v1.ResourceList{
|
||||
|
@ -1054,11 +1054,11 @@ func (n *NodeInfo) update(pod *v1.Pod, sign int64) {
|
||||
|
||||
func calculateResource(pod *v1.Pod) (Resource, int64, int64) {
|
||||
requests := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{
|
||||
InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
|
||||
UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
|
||||
})
|
||||
|
||||
non0Requests := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{
|
||||
InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
|
||||
UseStatusResources: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
|
||||
NonMissingContainerRequests: map[v1.ResourceName]resource.Quantity{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(schedutil.DefaultMilliCPURequest, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(schedutil.DefaultMemoryRequest, resource.DecimalSI),
|
||||
|
@ -1533,9 +1533,9 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
|
||||
restartAlways := v1.ContainerRestartPolicyAlways
|
||||
|
||||
preparePod := func(pod v1.Pod,
|
||||
requests, allocatedResources,
|
||||
initRequests, initAllocatedResources,
|
||||
sidecarRequests, sidecarAllocatedResources *v1.ResourceList,
|
||||
requests, statusResources,
|
||||
initRequests, initStatusResources,
|
||||
sidecarRequests, sidecarStatusResources *v1.ResourceList,
|
||||
resizeStatus v1.PodResizeStatus) v1.Pod {
|
||||
|
||||
if requests != nil {
|
||||
@ -1545,11 +1545,13 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
|
||||
Resources: v1.ResourceRequirements{Requests: *requests},
|
||||
})
|
||||
}
|
||||
if allocatedResources != nil {
|
||||
if statusResources != nil {
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses,
|
||||
v1.ContainerStatus{
|
||||
Name: "c1",
|
||||
AllocatedResources: *allocatedResources,
|
||||
Name: "c1",
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Requests: *statusResources,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@ -1561,11 +1563,13 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
|
||||
},
|
||||
)
|
||||
}
|
||||
if initAllocatedResources != nil {
|
||||
if initStatusResources != nil {
|
||||
pod.Status.InitContainerStatuses = append(pod.Status.InitContainerStatuses,
|
||||
v1.ContainerStatus{
|
||||
Name: "i1",
|
||||
AllocatedResources: *initAllocatedResources,
|
||||
Name: "i1",
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Requests: *initStatusResources,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@ -1578,11 +1582,13 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
|
||||
},
|
||||
)
|
||||
}
|
||||
if sidecarAllocatedResources != nil {
|
||||
if sidecarStatusResources != nil {
|
||||
pod.Status.InitContainerStatuses = append(pod.Status.InitContainerStatuses,
|
||||
v1.ContainerStatus{
|
||||
Name: "s1",
|
||||
AllocatedResources: *sidecarAllocatedResources,
|
||||
Name: "s1",
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Requests: *sidecarStatusResources,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
@ -1591,74 +1597,74 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
requests v1.ResourceList
|
||||
allocatedResources v1.ResourceList
|
||||
initRequests *v1.ResourceList
|
||||
initAllocatedResources *v1.ResourceList
|
||||
sidecarRequests *v1.ResourceList
|
||||
sidecarAllocatedResources *v1.ResourceList
|
||||
resizeStatus v1.PodResizeStatus
|
||||
expectedResource Resource
|
||||
expectedNon0CPU int64
|
||||
expectedNon0Mem int64
|
||||
name string
|
||||
requests v1.ResourceList
|
||||
statusResources v1.ResourceList
|
||||
initRequests *v1.ResourceList
|
||||
initStatusResources *v1.ResourceList
|
||||
sidecarRequests *v1.ResourceList
|
||||
sidecarStatusResources *v1.ResourceList
|
||||
resizeStatus v1.PodResizeStatus
|
||||
expectedResource Resource
|
||||
expectedNon0CPU int64
|
||||
expectedNon0Mem int64
|
||||
}{
|
||||
{
|
||||
name: "Pod with no pending resize",
|
||||
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
resizeStatus: "",
|
||||
expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()},
|
||||
expectedNon0CPU: cpu500m.MilliValue(),
|
||||
expectedNon0Mem: mem500M.Value(),
|
||||
name: "Pod with no pending resize",
|
||||
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
resizeStatus: "",
|
||||
expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()},
|
||||
expectedNon0CPU: cpu500m.MilliValue(),
|
||||
expectedNon0Mem: mem500M.Value(),
|
||||
},
|
||||
{
|
||||
name: "Pod with resize in progress",
|
||||
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
resizeStatus: v1.PodResizeStatusInProgress,
|
||||
expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()},
|
||||
expectedNon0CPU: cpu500m.MilliValue(),
|
||||
expectedNon0Mem: mem500M.Value(),
|
||||
name: "Pod with resize in progress",
|
||||
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
resizeStatus: v1.PodResizeStatusInProgress,
|
||||
expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()},
|
||||
expectedNon0CPU: cpu500m.MilliValue(),
|
||||
expectedNon0Mem: mem500M.Value(),
|
||||
},
|
||||
{
|
||||
name: "Pod with deferred resize",
|
||||
requests: v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||
allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
resizeStatus: v1.PodResizeStatusDeferred,
|
||||
expectedResource: Resource{MilliCPU: cpu700m.MilliValue(), Memory: mem800M.Value()},
|
||||
expectedNon0CPU: cpu700m.MilliValue(),
|
||||
expectedNon0Mem: mem800M.Value(),
|
||||
name: "Pod with deferred resize",
|
||||
requests: v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||
statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
resizeStatus: v1.PodResizeStatusDeferred,
|
||||
expectedResource: Resource{MilliCPU: cpu700m.MilliValue(), Memory: mem800M.Value()},
|
||||
expectedNon0CPU: cpu700m.MilliValue(),
|
||||
expectedNon0Mem: mem800M.Value(),
|
||||
},
|
||||
{
|
||||
name: "Pod with infeasible resize",
|
||||
requests: v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||
allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
resizeStatus: v1.PodResizeStatusInfeasible,
|
||||
expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()},
|
||||
expectedNon0CPU: cpu500m.MilliValue(),
|
||||
expectedNon0Mem: mem500M.Value(),
|
||||
name: "Pod with infeasible resize",
|
||||
requests: v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||
statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
resizeStatus: v1.PodResizeStatusInfeasible,
|
||||
expectedResource: Resource{MilliCPU: cpu500m.MilliValue(), Memory: mem500M.Value()},
|
||||
expectedNon0CPU: cpu500m.MilliValue(),
|
||||
expectedNon0Mem: mem500M.Value(),
|
||||
},
|
||||
{
|
||||
name: "Pod with init container and no pending resize",
|
||||
name: "Pod with init container and no pending resize",
|
||||
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
initRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||
initStatusResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||
resizeStatus: "",
|
||||
expectedResource: Resource{MilliCPU: cpu700m.MilliValue(), Memory: mem800M.Value()},
|
||||
expectedNon0CPU: cpu700m.MilliValue(),
|
||||
expectedNon0Mem: mem800M.Value(),
|
||||
},
|
||||
{
|
||||
name: "Pod with sider container and no pending resize",
|
||||
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
statusResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
initRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||
initAllocatedResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||
initStatusResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||
sidecarRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||
sidecarStatusResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||
resizeStatus: "",
|
||||
expectedResource: Resource{MilliCPU: cpu700m.MilliValue(), Memory: mem800M.Value()},
|
||||
expectedNon0CPU: cpu700m.MilliValue(),
|
||||
expectedNon0Mem: mem800M.Value(),
|
||||
},
|
||||
{
|
||||
name: "Pod with sider container and no pending resize",
|
||||
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
|
||||
initRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||
initAllocatedResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||
sidecarRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||
sidecarAllocatedResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
|
||||
resizeStatus: "",
|
||||
expectedResource: Resource{
|
||||
MilliCPU: cpu500m.MilliValue() + cpu700m.MilliValue(),
|
||||
Memory: mem500M.Value() + mem800M.Value(),
|
||||
@ -1671,9 +1677,9 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pod := preparePod(*testpod.DeepCopy(),
|
||||
&tt.requests, &tt.allocatedResources,
|
||||
tt.initRequests, tt.initAllocatedResources,
|
||||
tt.sidecarRequests, tt.sidecarAllocatedResources,
|
||||
&tt.requests, &tt.statusResources,
|
||||
tt.initRequests, tt.initStatusResources,
|
||||
tt.sidecarRequests, tt.sidecarStatusResources,
|
||||
tt.resizeStatus)
|
||||
|
||||
res, non0CPU, non0Mem := calculateResource(&pod)
|
||||
|
@ -34,14 +34,12 @@ import (
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
genericadmissioninitailizer "k8s.io/apiserver/pkg/admission/initializer"
|
||||
"k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
corev1listers "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/utils/lru"
|
||||
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -523,11 +521,8 @@ func PodValidateLimitFunc(limitRange *corev1.LimitRange, pod *api.Pod) error {
|
||||
|
||||
// enforce pod limits on init containers
|
||||
if limitType == corev1.LimitTypePod {
|
||||
opts := podResourcesOptions{
|
||||
InPlacePodVerticalScalingEnabled: feature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
|
||||
}
|
||||
podRequests := podRequests(pod, opts)
|
||||
podLimits := podLimits(pod, opts)
|
||||
podRequests := podRequests(pod)
|
||||
podLimits := podLimits(pod)
|
||||
for k, v := range limit.Min {
|
||||
if err := minConstraint(string(limitType), string(k), v, podRequests, podLimits); err != nil {
|
||||
errs = append(errs, err)
|
||||
@ -548,39 +543,17 @@ func PodValidateLimitFunc(limitRange *corev1.LimitRange, pod *api.Pod) error {
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
type podResourcesOptions struct {
|
||||
// InPlacePodVerticalScalingEnabled indicates that the in-place pod vertical scaling feature gate is enabled.
|
||||
InPlacePodVerticalScalingEnabled bool
|
||||
}
|
||||
|
||||
// podRequests is a simplified version of pkg/api/v1/resource/PodRequests that operates against the core version of
|
||||
// pod. Any changes to that calculation should be reflected here.
|
||||
// NOTE: We do not want to check status resources here, only the spec. This is equivalent to setting
|
||||
// UseStatusResources=false in the common helper.
|
||||
// TODO: Maybe we can consider doing a partial conversion of the pod to a v1
|
||||
// type and then using the pkg/api/v1/resource/PodRequests.
|
||||
func podRequests(pod *api.Pod, opts podResourcesOptions) api.ResourceList {
|
||||
func podRequests(pod *api.Pod) api.ResourceList {
|
||||
reqs := api.ResourceList{}
|
||||
|
||||
var containerStatuses map[string]*api.ContainerStatus
|
||||
if opts.InPlacePodVerticalScalingEnabled {
|
||||
containerStatuses = map[string]*api.ContainerStatus{}
|
||||
for i := range pod.Status.ContainerStatuses {
|
||||
containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
|
||||
}
|
||||
}
|
||||
|
||||
for _, container := range pod.Spec.Containers {
|
||||
containerReqs := container.Resources.Requests
|
||||
if opts.InPlacePodVerticalScalingEnabled {
|
||||
cs, found := containerStatuses[container.Name]
|
||||
if found {
|
||||
if pod.Status.Resize == api.PodResizeStatusInfeasible {
|
||||
containerReqs = cs.AllocatedResources
|
||||
} else {
|
||||
containerReqs = max(container.Resources.Requests, cs.AllocatedResources)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
addResourceList(reqs, containerReqs)
|
||||
}
|
||||
|
||||
@ -614,9 +587,11 @@ func podRequests(pod *api.Pod, opts podResourcesOptions) api.ResourceList {
|
||||
|
||||
// podLimits is a simplified version of pkg/api/v1/resource/PodLimits that operates against the core version of
|
||||
// pod. Any changes to that calculation should be reflected here.
|
||||
// NOTE: We do not want to check status resources here, only the spec. This is equivalent to setting
|
||||
// UseStatusResources=false in the common helper.
|
||||
// TODO: Maybe we can consider doing a partial conversion of the pod to a v1
|
||||
// type and then using the pkg/api/v1/resource/PodLimits.
|
||||
func podLimits(pod *api.Pod, opts podResourcesOptions) api.ResourceList {
|
||||
func podLimits(pod *api.Pod) api.ResourceList {
|
||||
limits := api.ResourceList{}
|
||||
|
||||
for _, container := range pod.Spec.Containers {
|
||||
@ -669,24 +644,3 @@ func maxResourceList(list, newList api.ResourceList) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// max returns the result of max(a, b) for each named resource and is only used if we can't
|
||||
// accumulate into an existing resource list
|
||||
func max(a api.ResourceList, b api.ResourceList) api.ResourceList {
|
||||
result := api.ResourceList{}
|
||||
for key, value := range a {
|
||||
if other, found := b[key]; found {
|
||||
if value.Cmp(other) <= 0 {
|
||||
result[key] = other.DeepCopy()
|
||||
continue
|
||||
}
|
||||
}
|
||||
result[key] = value.DeepCopy()
|
||||
}
|
||||
for key, value := range b {
|
||||
if _, found := result[key]; !found {
|
||||
result[key] = value.DeepCopy()
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
@ -35,8 +35,10 @@ type PodResourcesOptions struct {
|
||||
// Reuse, if provided will be reused to accumulate resources and returned by the PodRequests or PodLimits
|
||||
// functions. All existing values in Reuse will be lost.
|
||||
Reuse v1.ResourceList
|
||||
// InPlacePodVerticalScalingEnabled indicates that the in-place pod vertical scaling feature gate is enabled.
|
||||
InPlacePodVerticalScalingEnabled bool
|
||||
// UseStatusResources indicates whether resources reported by the PodStatus should be considered
|
||||
// when evaluating the pod resources. This MUST be false if the InPlacePodVerticalScaling
|
||||
// feature is not enabled.
|
||||
UseStatusResources bool
|
||||
// ExcludeOverhead controls if pod overhead is excluded from the calculation.
|
||||
ExcludeOverhead bool
|
||||
// ContainerFn is called with the effective resources required for each container within the pod.
|
||||
@ -54,7 +56,7 @@ func PodRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
|
||||
reqs := reuseOrClearResourceList(opts.Reuse)
|
||||
|
||||
var containerStatuses map[string]*v1.ContainerStatus
|
||||
if opts.InPlacePodVerticalScalingEnabled {
|
||||
if opts.UseStatusResources {
|
||||
containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses))
|
||||
for i := range pod.Status.ContainerStatuses {
|
||||
containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
|
||||
@ -63,13 +65,13 @@ func PodRequests(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
|
||||
|
||||
for _, container := range pod.Spec.Containers {
|
||||
containerReqs := container.Resources.Requests
|
||||
if opts.InPlacePodVerticalScalingEnabled {
|
||||
if opts.UseStatusResources {
|
||||
cs, found := containerStatuses[container.Name]
|
||||
if found {
|
||||
if found && cs.Resources != nil {
|
||||
if pod.Status.Resize == v1.PodResizeStatusInfeasible {
|
||||
containerReqs = cs.AllocatedResources.DeepCopy()
|
||||
containerReqs = cs.Resources.Requests.DeepCopy()
|
||||
} else {
|
||||
containerReqs = max(container.Resources.Requests, cs.AllocatedResources)
|
||||
containerReqs = max(container.Resources.Requests, cs.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -155,11 +157,31 @@ func PodLimits(pod *v1.Pod, opts PodResourcesOptions) v1.ResourceList {
|
||||
// attempt to reuse the maps if passed, or allocate otherwise
|
||||
limits := reuseOrClearResourceList(opts.Reuse)
|
||||
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if opts.ContainerFn != nil {
|
||||
opts.ContainerFn(container.Resources.Limits, Containers)
|
||||
var containerStatuses map[string]*v1.ContainerStatus
|
||||
if opts.UseStatusResources {
|
||||
containerStatuses = make(map[string]*v1.ContainerStatus, len(pod.Status.ContainerStatuses))
|
||||
for i := range pod.Status.ContainerStatuses {
|
||||
containerStatuses[pod.Status.ContainerStatuses[i].Name] = &pod.Status.ContainerStatuses[i]
|
||||
}
|
||||
addResourceList(limits, container.Resources.Limits)
|
||||
}
|
||||
|
||||
for _, container := range pod.Spec.Containers {
|
||||
containerLimits := container.Resources.Limits
|
||||
if opts.UseStatusResources {
|
||||
cs, found := containerStatuses[container.Name]
|
||||
if found && cs.Resources != nil {
|
||||
if pod.Status.Resize == v1.PodResizeStatusInfeasible {
|
||||
containerLimits = cs.Resources.Limits.DeepCopy()
|
||||
} else {
|
||||
containerLimits = max(container.Resources.Limits, cs.Resources.Limits)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if opts.ContainerFn != nil {
|
||||
opts.ContainerFn(containerLimits, Containers)
|
||||
}
|
||||
addResourceList(limits, containerLimits)
|
||||
}
|
||||
|
||||
restartableInitContainerLimits := v1.ResourceList{}
|
||||
|
@ -432,7 +432,7 @@ func TestPodResourceRequests(t *testing.T) {
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
},
|
||||
podResizeStatus: v1.PodResizeStatusInfeasible,
|
||||
options: PodResourcesOptions{InPlacePodVerticalScalingEnabled: true},
|
||||
options: PodResourcesOptions{UseStatusResources: true},
|
||||
containers: []v1.Container{
|
||||
{
|
||||
Name: "container-1",
|
||||
@ -446,8 +446,10 @@ func TestPodResourceRequests(t *testing.T) {
|
||||
containerStatus: []v1.ContainerStatus{
|
||||
{
|
||||
Name: "container-1",
|
||||
AllocatedResources: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -457,7 +459,7 @@ func TestPodResourceRequests(t *testing.T) {
|
||||
expectedRequests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
options: PodResourcesOptions{InPlacePodVerticalScalingEnabled: true},
|
||||
options: PodResourcesOptions{UseStatusResources: true},
|
||||
containers: []v1.Container{
|
||||
{
|
||||
Name: "container-1",
|
||||
@ -471,19 +473,21 @@ func TestPodResourceRequests(t *testing.T) {
|
||||
containerStatus: []v1.ContainerStatus{
|
||||
{
|
||||
Name: "container-1",
|
||||
AllocatedResources: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "resized, infeasible, feature gate disabled",
|
||||
description: "resized, infeasible, but don't use status",
|
||||
expectedRequests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
podResizeStatus: v1.PodResizeStatusInfeasible,
|
||||
options: PodResourcesOptions{InPlacePodVerticalScalingEnabled: false},
|
||||
options: PodResourcesOptions{UseStatusResources: false},
|
||||
containers: []v1.Container{
|
||||
{
|
||||
Name: "container-1",
|
||||
@ -497,8 +501,10 @@ func TestPodResourceRequests(t *testing.T) {
|
||||
containerStatus: []v1.ContainerStatus{
|
||||
{
|
||||
Name: "container-1",
|
||||
AllocatedResources: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -742,12 +748,13 @@ func TestPodResourceRequestsReuse(t *testing.T) {
|
||||
func TestPodResourceLimits(t *testing.T) {
|
||||
restartAlways := v1.ContainerRestartPolicyAlways
|
||||
testCases := []struct {
|
||||
description string
|
||||
options PodResourcesOptions
|
||||
overhead v1.ResourceList
|
||||
initContainers []v1.Container
|
||||
containers []v1.Container
|
||||
expectedLimits v1.ResourceList
|
||||
description string
|
||||
options PodResourcesOptions
|
||||
overhead v1.ResourceList
|
||||
initContainers []v1.Container
|
||||
containers []v1.Container
|
||||
containerStatuses []v1.ContainerStatus
|
||||
expectedLimits v1.ResourceList
|
||||
}{
|
||||
{
|
||||
description: "nil options, larger init container",
|
||||
@ -1119,6 +1126,87 @@ func TestPodResourceLimits(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "pod scaled up",
|
||||
expectedLimits: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
options: PodResourcesOptions{UseStatusResources: true},
|
||||
containers: []v1.Container{
|
||||
{
|
||||
Name: "container-1",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
containerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
Name: "container-1",
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "pod scaled down",
|
||||
expectedLimits: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
options: PodResourcesOptions{UseStatusResources: true},
|
||||
containers: []v1.Container{
|
||||
{
|
||||
Name: "container-1",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
containerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
Name: "container-1",
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
description: "pod scaled down, don't use status",
|
||||
expectedLimits: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
options: PodResourcesOptions{UseStatusResources: false},
|
||||
containers: []v1.Container{
|
||||
{
|
||||
Name: "container-1",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
containerStatuses: []v1.ContainerStatus{
|
||||
{
|
||||
Name: "container-1",
|
||||
Resources: &v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
@ -1128,6 +1216,9 @@ func TestPodResourceLimits(t *testing.T) {
|
||||
InitContainers: tc.initContainers,
|
||||
Overhead: tc.overhead,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
ContainerStatuses: tc.containerStatuses,
|
||||
},
|
||||
}
|
||||
limits := PodLimits(p, tc.options)
|
||||
if diff := cmp.Diff(limits, tc.expectedLimits); diff != "" {
|
||||
|
@ -49,11 +49,11 @@ func podRequests(pod *corev1.Pod) corev1.ResourceList {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
containerReqs := container.Resources.Requests
|
||||
cs, found := containerStatuses[container.Name]
|
||||
if found {
|
||||
if found && cs.Resources != nil {
|
||||
if pod.Status.Resize == corev1.PodResizeStatusInfeasible {
|
||||
containerReqs = cs.AllocatedResources.DeepCopy()
|
||||
containerReqs = cs.Resources.Requests.DeepCopy()
|
||||
} else {
|
||||
containerReqs = max(container.Resources.Requests, cs.AllocatedResources)
|
||||
containerReqs = max(container.Resources.Requests, cs.Resources.Requests)
|
||||
}
|
||||
}
|
||||
addResourceList(reqs, containerReqs)
|
||||
|
@ -122,13 +122,16 @@ func doPodResizeTests(f *framework.Framework) {
|
||||
patchString string
|
||||
expected []e2epod.ResizableContainerInfo
|
||||
addExtendedResource bool
|
||||
// TODO(123940): test rollback for all test cases once resize is more responsive.
|
||||
testRollback bool
|
||||
}
|
||||
|
||||
noRestart := v1.NotRequired
|
||||
doRestart := v1.RestartContainer
|
||||
tests := []testCase{
|
||||
{
|
||||
name: "Guaranteed QoS pod, one container - increase CPU & memory",
|
||||
name: "Guaranteed QoS pod, one container - increase CPU & memory",
|
||||
testRollback: true,
|
||||
containers: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -208,7 +211,8 @@ func doPodResizeTests(f *framework.Framework) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Guaranteed QoS pod, three containers (c1, c2, c3) - increase: CPU (c1,c3), memory (c2) ; decrease: CPU (c2), memory (c1,c3)",
|
||||
name: "Guaranteed QoS pod, three containers (c1, c2, c3) - increase: CPU (c1,c3), memory (c2) ; decrease: CPU (c2), memory (c1,c3)",
|
||||
testRollback: true,
|
||||
containers: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -256,7 +260,8 @@ func doPodResizeTests(f *framework.Framework) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests only",
|
||||
name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory requests only",
|
||||
testRollback: true,
|
||||
containers: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -274,7 +279,8 @@ func doPodResizeTests(f *framework.Framework) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory limits only",
|
||||
name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease memory limits only",
|
||||
testRollback: true,
|
||||
containers: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -328,7 +334,8 @@ func doPodResizeTests(f *framework.Framework) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests only",
|
||||
name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU requests only",
|
||||
testRollback: true,
|
||||
containers: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -346,7 +353,8 @@ func doPodResizeTests(f *framework.Framework) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU limits only",
|
||||
name: "Burstable QoS pod, one container with cpu & memory requests + limits - decrease CPU limits only",
|
||||
testRollback: true,
|
||||
containers: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -634,7 +642,8 @@ func doPodResizeTests(f *framework.Framework) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Guaranteed QoS pod, one container - increase CPU (NotRequired) & memory (RestartContainer)",
|
||||
name: "Guaranteed QoS pod, one container - increase CPU (NotRequired) & memory (RestartContainer)",
|
||||
testRollback: true,
|
||||
containers: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -657,7 +666,8 @@ func doPodResizeTests(f *framework.Framework) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Burstable QoS pod, one container - decrease CPU (RestartContainer) & memory (NotRequired)",
|
||||
name: "Burstable QoS pod, one container - decrease CPU (RestartContainer) & memory (NotRequired)",
|
||||
testRollback: true,
|
||||
containers: []e2epod.ResizableContainerInfo{
|
||||
{
|
||||
Name: "c1",
|
||||
@ -850,8 +860,6 @@ func doPodResizeTests(f *framework.Framework) {
|
||||
},
|
||||
}
|
||||
|
||||
timeouts := framework.NewTimeoutContext()
|
||||
|
||||
for idx := range tests {
|
||||
tc := tests[idx]
|
||||
ginkgo.It(tc.name, func(ctx context.Context) {
|
||||
@ -862,7 +870,8 @@ func doPodResizeTests(f *framework.Framework) {
|
||||
tStamp := strconv.Itoa(time.Now().Nanosecond())
|
||||
e2epod.InitDefaultResizePolicy(tc.containers)
|
||||
e2epod.InitDefaultResizePolicy(tc.expected)
|
||||
testPod = e2epod.MakePodWithResizableContainers(f.Namespace.Name, "testpod", tStamp, tc.containers)
|
||||
testPod = e2epod.MakePodWithResizableContainers(f.Namespace.Name, "", tStamp, tc.containers)
|
||||
testPod.GenerateName = "resize-test-"
|
||||
testPod = e2epod.MustMixinRestrictedPodSecurity(testPod)
|
||||
|
||||
if tc.addExtendedResource {
|
||||
@ -882,17 +891,17 @@ func doPodResizeTests(f *framework.Framework) {
|
||||
ginkgo.By("creating pod")
|
||||
newPod := podClient.CreateSync(ctx, testPod)
|
||||
|
||||
ginkgo.By("verifying initial pod resources, allocations are as expected")
|
||||
ginkgo.By("verifying initial pod resources are as expected")
|
||||
e2epod.VerifyPodResources(newPod, tc.containers)
|
||||
ginkgo.By("verifying initial pod resize policy is as expected")
|
||||
e2epod.VerifyPodResizePolicy(newPod, tc.containers)
|
||||
|
||||
ginkgo.By("verifying initial pod status resources are as expected")
|
||||
e2epod.VerifyPodStatusResources(newPod, tc.containers)
|
||||
framework.ExpectNoError(e2epod.VerifyPodStatusResources(newPod, tc.containers))
|
||||
ginkgo.By("verifying initial cgroup config are as expected")
|
||||
framework.ExpectNoError(e2epod.VerifyPodContainersCgroupValues(ctx, f, newPod, tc.containers))
|
||||
|
||||
patchAndVerify := func(patchString string, expectedContainers []e2epod.ResizableContainerInfo, initialContainers []e2epod.ResizableContainerInfo, opStr string, isRollback bool) {
|
||||
patchAndVerify := func(patchString string, expectedContainers []e2epod.ResizableContainerInfo, opStr string) {
|
||||
ginkgo.By(fmt.Sprintf("patching pod for %s", opStr))
|
||||
patchedPod, pErr = f.ClientSet.CoreV1().Pods(newPod.Namespace).Patch(context.TODO(), newPod.Name,
|
||||
types.StrategicMergePatchType, []byte(patchString), metav1.PatchOptions{})
|
||||
@ -900,35 +909,32 @@ func doPodResizeTests(f *framework.Framework) {
|
||||
|
||||
ginkgo.By(fmt.Sprintf("verifying pod patched for %s", opStr))
|
||||
e2epod.VerifyPodResources(patchedPod, expectedContainers)
|
||||
gomega.Eventually(ctx, e2epod.VerifyPodAllocations, timeouts.PodStartShort, timeouts.Poll).
|
||||
WithArguments(patchedPod, initialContainers).
|
||||
Should(gomega.BeNil(), "failed to verify Pod allocations for patchedPod")
|
||||
|
||||
ginkgo.By(fmt.Sprintf("waiting for %s to be actuated", opStr))
|
||||
resizedPod := e2epod.WaitForPodResizeActuation(ctx, f, podClient, newPod, patchedPod, expectedContainers, initialContainers, isRollback)
|
||||
|
||||
// Check cgroup values only for containerd versions before 1.6.9
|
||||
ginkgo.By(fmt.Sprintf("verifying pod container's cgroup values after %s", opStr))
|
||||
framework.ExpectNoError(e2epod.VerifyPodContainersCgroupValues(ctx, f, resizedPod, expectedContainers))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("verifying pod resources after %s", opStr))
|
||||
e2epod.VerifyPodResources(resizedPod, expectedContainers)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("verifying pod allocations after %s", opStr))
|
||||
gomega.Eventually(ctx, e2epod.VerifyPodAllocations, timeouts.PodStartShort, timeouts.Poll).
|
||||
WithArguments(resizedPod, expectedContainers).
|
||||
Should(gomega.BeNil(), "failed to verify Pod allocations for resizedPod")
|
||||
resizedPod := e2epod.WaitForPodResizeActuation(ctx, f, podClient, newPod)
|
||||
e2epod.ExpectPodResized(ctx, f, resizedPod, expectedContainers)
|
||||
}
|
||||
|
||||
patchAndVerify(tc.patchString, tc.expected, tc.containers, "resize", false)
|
||||
patchAndVerify(tc.patchString, tc.expected, "resize")
|
||||
|
||||
rbPatchStr, err := e2epod.ResizeContainerPatch(tc.containers)
|
||||
framework.ExpectNoError(err)
|
||||
// Resize has been actuated, test rollback
|
||||
patchAndVerify(rbPatchStr, tc.containers, tc.expected, "rollback", true)
|
||||
if tc.testRollback {
|
||||
// Resize has been actuated, test rollback
|
||||
rollbackContainers := make([]e2epod.ResizableContainerInfo, len(tc.containers))
|
||||
copy(rollbackContainers, tc.containers)
|
||||
for i, c := range rollbackContainers {
|
||||
gomega.Expect(c.Name).To(gomega.Equal(tc.expected[i].Name),
|
||||
"test case containers & expectations should be in the same order")
|
||||
// Resizes that trigger a restart should trigger a second restart when rolling back.
|
||||
rollbackContainers[i].RestartCount = tc.expected[i].RestartCount * 2
|
||||
}
|
||||
|
||||
rbPatchStr, err := e2epod.ResizeContainerPatch(tc.containers)
|
||||
framework.ExpectNoError(err)
|
||||
patchAndVerify(rbPatchStr, rollbackContainers, "rollback")
|
||||
}
|
||||
|
||||
ginkgo.By("deleting pod")
|
||||
podClient.DeleteSync(ctx, newPod.Name, metav1.DeleteOptions{}, timeouts.PodDelete)
|
||||
framework.ExpectNoError(podClient.Delete(ctx, newPod.Name, metav1.DeleteOptions{}))
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -963,7 +969,7 @@ func doPodResizeErrorTests(f *framework.Framework) {
|
||||
},
|
||||
}
|
||||
|
||||
timeouts := framework.NewTimeoutContext()
|
||||
timeouts := f.Timeouts
|
||||
|
||||
for idx := range tests {
|
||||
tc := tests[idx]
|
||||
@ -981,12 +987,12 @@ func doPodResizeErrorTests(f *framework.Framework) {
|
||||
ginkgo.By("creating pod")
|
||||
newPod := podClient.CreateSync(ctx, testPod)
|
||||
|
||||
ginkgo.By("verifying initial pod resources, allocations, and policy are as expected")
|
||||
ginkgo.By("verifying initial pod resources, and policy are as expected")
|
||||
e2epod.VerifyPodResources(newPod, tc.containers)
|
||||
e2epod.VerifyPodResizePolicy(newPod, tc.containers)
|
||||
|
||||
ginkgo.By("verifying initial pod status resources and cgroup config are as expected")
|
||||
e2epod.VerifyPodStatusResources(newPod, tc.containers)
|
||||
framework.ExpectNoError(e2epod.VerifyPodStatusResources(newPod, tc.containers))
|
||||
|
||||
ginkgo.By("patching pod for resize")
|
||||
patchedPod, pErr = f.ClientSet.CoreV1().Pods(newPod.Namespace).Patch(ctx, newPod.Name,
|
||||
@ -1001,10 +1007,8 @@ func doPodResizeErrorTests(f *framework.Framework) {
|
||||
ginkgo.By("verifying pod resources after patch")
|
||||
e2epod.VerifyPodResources(patchedPod, tc.expected)
|
||||
|
||||
ginkgo.By("verifying pod allocations after patch")
|
||||
gomega.Eventually(ctx, e2epod.VerifyPodAllocations, timeouts.PodStartShort, timeouts.Poll).
|
||||
WithArguments(patchedPod, tc.expected).
|
||||
Should(gomega.BeNil(), "failed to verify Pod allocations for patchedPod")
|
||||
ginkgo.By("verifying pod status resources after patch")
|
||||
framework.ExpectNoError(e2epod.VerifyPodStatusResources(patchedPod, tc.expected))
|
||||
|
||||
ginkgo.By("deleting pod")
|
||||
podClient.DeleteSync(ctx, newPod.Name, metav1.DeleteOptions{}, timeouts.PodDelete)
|
||||
|
@ -19,6 +19,7 @@ package pod
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
@ -26,12 +27,11 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
kubecm "k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
@ -64,17 +64,42 @@ type ContainerResources struct {
|
||||
ExtendedResourceLim string
|
||||
}
|
||||
|
||||
type ContainerAllocations struct {
|
||||
CPUAlloc string
|
||||
MemAlloc string
|
||||
ephStorAlloc string
|
||||
ExtendedResourceAlloc string
|
||||
func (cr *ContainerResources) ResourceRequirements() *v1.ResourceRequirements {
|
||||
if cr == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var lim, req v1.ResourceList
|
||||
if cr.CPULim != "" || cr.MemLim != "" || cr.EphStorLim != "" {
|
||||
lim = make(v1.ResourceList)
|
||||
}
|
||||
if cr.CPUReq != "" || cr.MemReq != "" || cr.EphStorReq != "" {
|
||||
req = make(v1.ResourceList)
|
||||
}
|
||||
if cr.CPULim != "" {
|
||||
lim[v1.ResourceCPU] = resource.MustParse(cr.CPULim)
|
||||
}
|
||||
if cr.MemLim != "" {
|
||||
lim[v1.ResourceMemory] = resource.MustParse(cr.MemLim)
|
||||
}
|
||||
if cr.EphStorLim != "" {
|
||||
lim[v1.ResourceEphemeralStorage] = resource.MustParse(cr.EphStorLim)
|
||||
}
|
||||
if cr.CPUReq != "" {
|
||||
req[v1.ResourceCPU] = resource.MustParse(cr.CPUReq)
|
||||
}
|
||||
if cr.MemReq != "" {
|
||||
req[v1.ResourceMemory] = resource.MustParse(cr.MemReq)
|
||||
}
|
||||
if cr.EphStorReq != "" {
|
||||
req[v1.ResourceEphemeralStorage] = resource.MustParse(cr.EphStorReq)
|
||||
}
|
||||
return &v1.ResourceRequirements{Limits: lim, Requests: req}
|
||||
}
|
||||
|
||||
type ResizableContainerInfo struct {
|
||||
Name string
|
||||
Resources *ContainerResources
|
||||
Allocations *ContainerAllocations
|
||||
CPUPolicy *v1.ResourceResizeRestartPolicy
|
||||
MemPolicy *v1.ResourceResizeRestartPolicy
|
||||
RestartCount int32
|
||||
@ -102,51 +127,9 @@ type patchSpec struct {
|
||||
} `json:"spec"`
|
||||
}
|
||||
|
||||
func getTestResourceInfo(tcInfo ResizableContainerInfo) (v1.ResourceRequirements, v1.ResourceList, []v1.ContainerResizePolicy) {
|
||||
var res v1.ResourceRequirements
|
||||
var alloc v1.ResourceList
|
||||
var resizePol []v1.ContainerResizePolicy
|
||||
|
||||
func getTestResourceInfo(tcInfo ResizableContainerInfo) (res v1.ResourceRequirements, resizePol []v1.ContainerResizePolicy) {
|
||||
if tcInfo.Resources != nil {
|
||||
var lim, req v1.ResourceList
|
||||
if tcInfo.Resources.CPULim != "" || tcInfo.Resources.MemLim != "" || tcInfo.Resources.EphStorLim != "" {
|
||||
lim = make(v1.ResourceList)
|
||||
}
|
||||
if tcInfo.Resources.CPUReq != "" || tcInfo.Resources.MemReq != "" || tcInfo.Resources.EphStorReq != "" {
|
||||
req = make(v1.ResourceList)
|
||||
}
|
||||
if tcInfo.Resources.CPULim != "" {
|
||||
lim[v1.ResourceCPU] = resource.MustParse(tcInfo.Resources.CPULim)
|
||||
}
|
||||
if tcInfo.Resources.MemLim != "" {
|
||||
lim[v1.ResourceMemory] = resource.MustParse(tcInfo.Resources.MemLim)
|
||||
}
|
||||
if tcInfo.Resources.EphStorLim != "" {
|
||||
lim[v1.ResourceEphemeralStorage] = resource.MustParse(tcInfo.Resources.EphStorLim)
|
||||
}
|
||||
if tcInfo.Resources.CPUReq != "" {
|
||||
req[v1.ResourceCPU] = resource.MustParse(tcInfo.Resources.CPUReq)
|
||||
}
|
||||
if tcInfo.Resources.MemReq != "" {
|
||||
req[v1.ResourceMemory] = resource.MustParse(tcInfo.Resources.MemReq)
|
||||
}
|
||||
if tcInfo.Resources.EphStorReq != "" {
|
||||
req[v1.ResourceEphemeralStorage] = resource.MustParse(tcInfo.Resources.EphStorReq)
|
||||
}
|
||||
res = v1.ResourceRequirements{Limits: lim, Requests: req}
|
||||
}
|
||||
if tcInfo.Allocations != nil {
|
||||
alloc = make(v1.ResourceList)
|
||||
if tcInfo.Allocations.CPUAlloc != "" {
|
||||
alloc[v1.ResourceCPU] = resource.MustParse(tcInfo.Allocations.CPUAlloc)
|
||||
}
|
||||
if tcInfo.Allocations.MemAlloc != "" {
|
||||
alloc[v1.ResourceMemory] = resource.MustParse(tcInfo.Allocations.MemAlloc)
|
||||
}
|
||||
if tcInfo.Allocations.ephStorAlloc != "" {
|
||||
alloc[v1.ResourceEphemeralStorage] = resource.MustParse(tcInfo.Allocations.ephStorAlloc)
|
||||
}
|
||||
|
||||
res = *tcInfo.Resources.ResourceRequirements()
|
||||
}
|
||||
if tcInfo.CPUPolicy != nil {
|
||||
cpuPol := v1.ContainerResizePolicy{ResourceName: v1.ResourceCPU, RestartPolicy: *tcInfo.CPUPolicy}
|
||||
@ -156,7 +139,7 @@ func getTestResourceInfo(tcInfo ResizableContainerInfo) (v1.ResourceRequirements
|
||||
memPol := v1.ContainerResizePolicy{ResourceName: v1.ResourceMemory, RestartPolicy: *tcInfo.MemPolicy}
|
||||
resizePol = append(resizePol, memPol)
|
||||
}
|
||||
return res, alloc, resizePol
|
||||
return res, resizePol
|
||||
}
|
||||
|
||||
func InitDefaultResizePolicy(containers []ResizableContainerInfo) {
|
||||
@ -174,9 +157,9 @@ func InitDefaultResizePolicy(containers []ResizableContainerInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
func makeResizableContainer(tcInfo ResizableContainerInfo) (v1.Container, v1.ContainerStatus) {
|
||||
func makeResizableContainer(tcInfo ResizableContainerInfo) v1.Container {
|
||||
cmd := "grep Cpus_allowed_list /proc/self/status | cut -f2 && sleep 1d"
|
||||
res, alloc, resizePol := getTestResourceInfo(tcInfo)
|
||||
res, resizePol := getTestResourceInfo(tcInfo)
|
||||
|
||||
tc := v1.Container{
|
||||
Name: tcInfo.Name,
|
||||
@ -187,18 +170,14 @@ func makeResizableContainer(tcInfo ResizableContainerInfo) (v1.Container, v1.Con
|
||||
ResizePolicy: resizePol,
|
||||
}
|
||||
|
||||
tcStatus := v1.ContainerStatus{
|
||||
Name: tcInfo.Name,
|
||||
AllocatedResources: alloc,
|
||||
}
|
||||
return tc, tcStatus
|
||||
return tc
|
||||
}
|
||||
|
||||
func MakePodWithResizableContainers(ns, name, timeStamp string, tcInfo []ResizableContainerInfo) *v1.Pod {
|
||||
var testContainers []v1.Container
|
||||
|
||||
for _, ci := range tcInfo {
|
||||
tc, _ := makeResizableContainer(ci)
|
||||
tc := makeResizableContainer(ci)
|
||||
testContainers = append(testContainers, tc)
|
||||
}
|
||||
pod := &v1.Pod{
|
||||
@ -223,7 +202,7 @@ func VerifyPodResizePolicy(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
|
||||
gomega.Expect(gotPod.Spec.Containers).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
|
||||
for i, wantCtr := range wantCtrs {
|
||||
gotCtr := &gotPod.Spec.Containers[i]
|
||||
ctr, _ := makeResizableContainer(wantCtr)
|
||||
ctr := makeResizableContainer(wantCtr)
|
||||
gomega.Expect(gotCtr.Name).To(gomega.Equal(ctr.Name))
|
||||
gomega.Expect(gotCtr.ResizePolicy).To(gomega.Equal(ctr.ResizePolicy))
|
||||
}
|
||||
@ -234,45 +213,34 @@ func VerifyPodResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
|
||||
gomega.Expect(gotPod.Spec.Containers).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
|
||||
for i, wantCtr := range wantCtrs {
|
||||
gotCtr := &gotPod.Spec.Containers[i]
|
||||
ctr, _ := makeResizableContainer(wantCtr)
|
||||
ctr := makeResizableContainer(wantCtr)
|
||||
gomega.Expect(gotCtr.Name).To(gomega.Equal(ctr.Name))
|
||||
gomega.Expect(gotCtr.Resources).To(gomega.Equal(ctr.Resources))
|
||||
}
|
||||
}
|
||||
|
||||
func VerifyPodAllocations(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) error {
|
||||
func VerifyPodStatusResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) error {
|
||||
ginkgo.GinkgoHelper()
|
||||
gomega.Expect(gotPod.Status.ContainerStatuses).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
|
||||
|
||||
var errs []error
|
||||
|
||||
if len(gotPod.Status.ContainerStatuses) != len(wantCtrs) {
|
||||
return fmt.Errorf("expectation length mismatch: got %d statuses, want %d",
|
||||
len(gotPod.Status.ContainerStatuses), len(wantCtrs))
|
||||
}
|
||||
for i, wantCtr := range wantCtrs {
|
||||
gotCtrStatus := &gotPod.Status.ContainerStatuses[i]
|
||||
if wantCtr.Allocations == nil {
|
||||
if wantCtr.Resources != nil {
|
||||
alloc := &ContainerAllocations{CPUAlloc: wantCtr.Resources.CPUReq, MemAlloc: wantCtr.Resources.MemReq}
|
||||
wantCtr.Allocations = alloc
|
||||
defer func() {
|
||||
wantCtr.Allocations = nil
|
||||
}()
|
||||
}
|
||||
ctr := makeResizableContainer(wantCtr)
|
||||
if gotCtrStatus.Name != ctr.Name {
|
||||
errs = append(errs, fmt.Errorf("container status %d name %q != expected name %q", i, gotCtrStatus.Name, ctr.Name))
|
||||
continue
|
||||
}
|
||||
|
||||
_, ctrStatus := makeResizableContainer(wantCtr)
|
||||
gomega.Expect(gotCtrStatus.Name).To(gomega.Equal(ctrStatus.Name))
|
||||
if !cmp.Equal(gotCtrStatus.AllocatedResources, ctrStatus.AllocatedResources) {
|
||||
return fmt.Errorf("failed to verify Pod allocations, allocated resources not equal to expected")
|
||||
if err := framework.Gomega().Expect(*gotCtrStatus.Resources).To(gomega.Equal(ctr.Resources)); err != nil {
|
||||
errs = append(errs, fmt.Errorf("container[%s] status resources mismatch: %w", ctr.Name, err))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func VerifyPodStatusResources(gotPod *v1.Pod, wantCtrs []ResizableContainerInfo) {
|
||||
ginkgo.GinkgoHelper()
|
||||
gomega.Expect(gotPod.Status.ContainerStatuses).To(gomega.HaveLen(len(wantCtrs)), "number of containers in pod spec should match")
|
||||
for i, wantCtr := range wantCtrs {
|
||||
gotCtrStatus := &gotPod.Status.ContainerStatuses[i]
|
||||
ctr, _ := makeResizableContainer(wantCtr)
|
||||
gomega.Expect(gotCtrStatus.Name).To(gomega.Equal(ctr.Name))
|
||||
gomega.Expect(ctr.Resources).To(gomega.Equal(*gotCtrStatus.Resources))
|
||||
}
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
// isPodOnCgroupv2Node checks whether the pod is running on cgroupv2 node.
|
||||
@ -307,19 +275,21 @@ func VerifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework
|
||||
pod.Namespace, pod.Name, cName, expectedCgValue, cgPath)
|
||||
cgValue, _, err := ExecCommandInContainerWithFullOutput(f, pod.Name, cName, "/bin/sh", "-c", cmd)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find expected value %q in container cgroup %q", expectedCgValue, cgPath)
|
||||
return fmt.Errorf("failed to read cgroup %q for container %s: %w", cgPath, cName, err)
|
||||
}
|
||||
cgValue = strings.Trim(cgValue, "\n")
|
||||
if cgValue != expectedCgValue {
|
||||
return fmt.Errorf("cgroup value %q not equal to expected %q", cgValue, expectedCgValue)
|
||||
return fmt.Errorf("container %s cgroup %q doesn't match expected: got %q want %q",
|
||||
cName, cgPath, cgValue, expectedCgValue)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
var errs []error
|
||||
for _, ci := range tcInfo {
|
||||
if ci.Resources == nil {
|
||||
continue
|
||||
}
|
||||
tc, _ := makeResizableContainer(ci)
|
||||
tc := makeResizableContainer(ci)
|
||||
if tc.Resources.Limits != nil || tc.Resources.Requests != nil {
|
||||
var expectedCPUShares int64
|
||||
var expectedCPULimitString, expectedMemLimitString string
|
||||
@ -350,90 +320,85 @@ func VerifyPodContainersCgroupValues(ctx context.Context, f *framework.Framework
|
||||
expectedCPUShares = int64(1 + ((expectedCPUShares-2)*9999)/262142)
|
||||
}
|
||||
if expectedMemLimitString != "0" {
|
||||
err := verifyCgroupValue(ci.Name, cgroupMemLimit, expectedMemLimitString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
err := verifyCgroupValue(ci.Name, cgroupCPULimit, expectedCPULimitString)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = verifyCgroupValue(ci.Name, cgroupCPURequest, strconv.FormatInt(expectedCPUShares, 10))
|
||||
if err != nil {
|
||||
return err
|
||||
errs = append(errs, verifyCgroupValue(ci.Name, cgroupMemLimit, expectedMemLimitString))
|
||||
}
|
||||
errs = append(errs, verifyCgroupValue(ci.Name, cgroupCPULimit, expectedCPULimitString))
|
||||
errs = append(errs, verifyCgroupValue(ci.Name, cgroupCPURequest, strconv.FormatInt(expectedCPUShares, 10)))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func waitForContainerRestart(ctx context.Context, podClient *PodClient, pod *v1.Pod, expectedContainers []ResizableContainerInfo, initialContainers []ResizableContainerInfo, isRollback bool) error {
|
||||
func verifyContainerRestarts(pod *v1.Pod, expectedContainers []ResizableContainerInfo) error {
|
||||
ginkgo.GinkgoHelper()
|
||||
var restartContainersExpected []string
|
||||
|
||||
restartContainers := expectedContainers
|
||||
// if we're rolling back, extract restart counts from test case "expected" containers
|
||||
if isRollback {
|
||||
restartContainers = initialContainers
|
||||
expectContainerRestarts := map[string]int32{}
|
||||
for _, ci := range expectedContainers {
|
||||
expectContainerRestarts[ci.Name] = ci.RestartCount
|
||||
}
|
||||
|
||||
for _, ci := range restartContainers {
|
||||
if ci.RestartCount > 0 {
|
||||
restartContainersExpected = append(restartContainersExpected, ci.Name)
|
||||
errs := []error{}
|
||||
for _, cs := range pod.Status.ContainerStatuses {
|
||||
expectedRestarts := expectContainerRestarts[cs.Name]
|
||||
if cs.RestartCount != expectedRestarts {
|
||||
errs = append(errs, fmt.Errorf("unexpected number of restarts for container %s: got %d, want %d", cs.Name, cs.RestartCount, expectedRestarts))
|
||||
}
|
||||
}
|
||||
if len(restartContainersExpected) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
pod, err := podClient.Get(ctx, pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
restartedContainersCount := 0
|
||||
for _, cName := range restartContainersExpected {
|
||||
cs, _ := podutil.GetContainerStatus(pod.Status.ContainerStatuses, cName)
|
||||
if cs.RestartCount < 1 {
|
||||
break
|
||||
}
|
||||
restartedContainersCount++
|
||||
}
|
||||
if restartedContainersCount == len(restartContainersExpected) {
|
||||
return nil
|
||||
}
|
||||
if restartedContainersCount > len(restartContainersExpected) {
|
||||
return fmt.Errorf("more container restarts than expected")
|
||||
} else {
|
||||
return fmt.Errorf("less container restarts than expected")
|
||||
}
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func WaitForPodResizeActuation(ctx context.Context, f *framework.Framework, podClient *PodClient, pod, patchedPod *v1.Pod, expectedContainers []ResizableContainerInfo, initialContainers []ResizableContainerInfo, isRollback bool) *v1.Pod {
|
||||
func WaitForPodResizeActuation(ctx context.Context, f *framework.Framework, podClient *PodClient, pod *v1.Pod) *v1.Pod {
|
||||
ginkgo.GinkgoHelper()
|
||||
var resizedPod *v1.Pod
|
||||
var pErr error
|
||||
timeouts := framework.NewTimeoutContext()
|
||||
// Wait for container restart
|
||||
gomega.Eventually(ctx, waitForContainerRestart, timeouts.PodStartShort, timeouts.Poll).
|
||||
WithArguments(podClient, pod, expectedContainers, initialContainers, isRollback).
|
||||
ShouldNot(gomega.HaveOccurred(), "failed waiting for expected container restart")
|
||||
// Verify Pod Containers Cgroup Values
|
||||
gomega.Eventually(ctx, VerifyPodContainersCgroupValues, timeouts.PodStartShort, timeouts.Poll).
|
||||
WithArguments(f, patchedPod, expectedContainers).
|
||||
ShouldNot(gomega.HaveOccurred(), "failed to verify container cgroup values to match expected")
|
||||
// Wait for pod resource allocations to equal expected values after resize
|
||||
gomega.Eventually(ctx, func() error {
|
||||
resizedPod, pErr = podClient.Get(ctx, pod.Name, metav1.GetOptions{})
|
||||
if pErr != nil {
|
||||
return pErr
|
||||
}
|
||||
return VerifyPodAllocations(resizedPod, expectedContainers)
|
||||
}, timeouts.PodStartShort, timeouts.Poll).
|
||||
ShouldNot(gomega.HaveOccurred(), "timed out waiting for pod resource allocation values to match expected")
|
||||
// Wait for resize to complete.
|
||||
framework.ExpectNoError(WaitForPodCondition(ctx, f.ClientSet, pod.Namespace, pod.Name, "resize status cleared", f.Timeouts.PodStart,
|
||||
func(pod *v1.Pod) (bool, error) {
|
||||
if pod.Status.Resize == v1.PodResizeStatusInfeasible {
|
||||
// This is a terminal resize state
|
||||
return false, fmt.Errorf("resize is infeasible")
|
||||
}
|
||||
return pod.Status.Resize == "", nil
|
||||
}), "pod should finish resizing")
|
||||
|
||||
resizedPod, err := framework.GetObject(podClient.Get, pod.Name, metav1.GetOptions{})(ctx)
|
||||
framework.ExpectNoError(err, "failed to get resized pod")
|
||||
return resizedPod
|
||||
}
|
||||
|
||||
func ExpectPodResized(ctx context.Context, f *framework.Framework, resizedPod *v1.Pod, expectedContainers []ResizableContainerInfo) {
|
||||
ginkgo.GinkgoHelper()
|
||||
|
||||
// Put each error on a new line for readability.
|
||||
formatErrors := func(err error) error {
|
||||
var agg utilerrors.Aggregate
|
||||
if !errors.As(err, &agg) {
|
||||
return err
|
||||
}
|
||||
|
||||
errStrings := make([]string, len(agg.Errors()))
|
||||
for i, err := range agg.Errors() {
|
||||
errStrings[i] = err.Error()
|
||||
}
|
||||
return fmt.Errorf("[\n%s\n]", strings.Join(errStrings, ",\n"))
|
||||
}
|
||||
// Verify Pod Containers Cgroup Values
|
||||
var errs []error
|
||||
if cgroupErrs := VerifyPodContainersCgroupValues(ctx, f, resizedPod, expectedContainers); cgroupErrs != nil {
|
||||
errs = append(errs, fmt.Errorf("container cgroup values don't match expected: %w", formatErrors(cgroupErrs)))
|
||||
}
|
||||
if resourceErrs := VerifyPodStatusResources(resizedPod, expectedContainers); resourceErrs != nil {
|
||||
errs = append(errs, fmt.Errorf("container status resources don't match expected: %w", formatErrors(resourceErrs)))
|
||||
}
|
||||
if restartErrs := verifyContainerRestarts(resizedPod, expectedContainers); restartErrs != nil {
|
||||
errs = append(errs, fmt.Errorf("container restart counts don't match expected: %w", formatErrors(restartErrs)))
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
resizedPod.ManagedFields = nil // Suppress managed fields in error output.
|
||||
framework.ExpectNoError(formatErrors(utilerrors.NewAggregate(errs)),
|
||||
"Verifying pod resources resize state. Pod: %s", framework.PrettyPrintJSON(resizedPod))
|
||||
}
|
||||
}
|
||||
|
||||
// ResizeContainerPatch generates a patch string to resize the pod container.
|
||||
func ResizeContainerPatch(containers []ResizableContainerInfo) (string, error) {
|
||||
var patch patchSpec
|
||||
|
@ -38,8 +38,6 @@ import (
|
||||
)
|
||||
|
||||
func doPodResizeResourceQuotaTests(f *framework.Framework) {
|
||||
timeouts := framework.NewTimeoutContext()
|
||||
|
||||
ginkgo.It("pod-resize-resource-quota-test", func(ctx context.Context) {
|
||||
podClient := e2epod.NewPodClient(f)
|
||||
resourceQuota := v1.ResourceQuota{
|
||||
@ -92,7 +90,7 @@ func doPodResizeResourceQuotaTests(f *framework.Framework) {
|
||||
newPod1 := podClient.CreateSync(ctx, testPod1)
|
||||
newPod2 := podClient.CreateSync(ctx, testPod2)
|
||||
|
||||
ginkgo.By("verifying initial pod resources, allocations, and policy are as expected")
|
||||
ginkgo.By("verifying initial pod resources, and policy are as expected")
|
||||
e2epod.VerifyPodResources(newPod1, containers)
|
||||
|
||||
ginkgo.By("patching pod for resize within resource quota")
|
||||
@ -102,23 +100,14 @@ func doPodResizeResourceQuotaTests(f *framework.Framework) {
|
||||
|
||||
ginkgo.By("verifying pod patched for resize within resource quota")
|
||||
e2epod.VerifyPodResources(patchedPod, expected)
|
||||
gomega.Eventually(ctx, e2epod.VerifyPodAllocations, timeouts.PodStartShort, timeouts.Poll).
|
||||
WithArguments(patchedPod, containers).
|
||||
Should(gomega.BeNil(), "failed to verify Pod allocations for patchedPod")
|
||||
|
||||
ginkgo.By("waiting for resize to be actuated")
|
||||
resizedPod := e2epod.WaitForPodResizeActuation(ctx, f, podClient, newPod1, patchedPod, expected, containers, false)
|
||||
ginkgo.By("verifying pod container's cgroup values after resize")
|
||||
framework.ExpectNoError(e2epod.VerifyPodContainersCgroupValues(ctx, f, resizedPod, expected))
|
||||
resizedPod := e2epod.WaitForPodResizeActuation(ctx, f, podClient, newPod1)
|
||||
e2epod.ExpectPodResized(ctx, f, resizedPod, expected)
|
||||
|
||||
ginkgo.By("verifying pod resources after resize")
|
||||
e2epod.VerifyPodResources(resizedPod, expected)
|
||||
|
||||
ginkgo.By("verifying pod allocations after resize")
|
||||
gomega.Eventually(ctx, e2epod.VerifyPodAllocations, timeouts.PodStartShort, timeouts.Poll).
|
||||
WithArguments(resizedPod, expected).
|
||||
Should(gomega.BeNil(), "failed to verify Pod allocations for patchedPod")
|
||||
|
||||
ginkgo.By("patching pod for resize with memory exceeding resource quota")
|
||||
_, pErrExceedMemory := f.ClientSet.CoreV1().Pods(resizedPod.Namespace).Patch(ctx,
|
||||
resizedPod.Name, types.StrategicMergePatchType, []byte(patchStringExceedMemory), metav1.PatchOptions{})
|
||||
@ -129,9 +118,7 @@ func doPodResizeResourceQuotaTests(f *framework.Framework) {
|
||||
patchedPodExceedMemory, pErrEx2 := podClient.Get(ctx, resizedPod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(pErrEx2, "failed to get pod post exceed memory resize")
|
||||
e2epod.VerifyPodResources(patchedPodExceedMemory, expected)
|
||||
gomega.Eventually(ctx, e2epod.VerifyPodAllocations, timeouts.PodStartShort, timeouts.Poll).
|
||||
WithArguments(patchedPodExceedMemory, expected).
|
||||
Should(gomega.BeNil(), "failed to verify Pod allocations for patchedPod")
|
||||
framework.ExpectNoError(e2epod.VerifyPodStatusResources(patchedPodExceedMemory, expected))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("patching pod %s for resize with CPU exceeding resource quota", resizedPod.Name))
|
||||
_, pErrExceedCPU := f.ClientSet.CoreV1().Pods(resizedPod.Namespace).Patch(ctx,
|
||||
@ -143,9 +130,7 @@ func doPodResizeResourceQuotaTests(f *framework.Framework) {
|
||||
patchedPodExceedCPU, pErrEx1 := podClient.Get(ctx, resizedPod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(pErrEx1, "failed to get pod post exceed CPU resize")
|
||||
e2epod.VerifyPodResources(patchedPodExceedCPU, expected)
|
||||
gomega.Eventually(ctx, e2epod.VerifyPodAllocations, timeouts.PodStartShort, timeouts.Poll).
|
||||
WithArguments(patchedPodExceedCPU, expected).
|
||||
Should(gomega.BeNil(), "failed to verify Pod allocations for patchedPod")
|
||||
framework.ExpectNoError(e2epod.VerifyPodStatusResources(patchedPodExceedMemory, expected))
|
||||
|
||||
ginkgo.By("deleting pods")
|
||||
delErr1 := e2epod.DeletePodWithWait(ctx, f.ClientSet, newPod1)
|
||||
|
@ -500,6 +500,12 @@
|
||||
lockToDefault: false
|
||||
preRelease: Alpha
|
||||
version: "1.27"
|
||||
- name: InPlacePodVerticalScalingAllocatedStatus
|
||||
versionedSpecs:
|
||||
- default: false
|
||||
lockToDefault: false
|
||||
preRelease: Alpha
|
||||
version: "1.32"
|
||||
- name: InTreePluginPortworxUnregister
|
||||
versionedSpecs:
|
||||
- default: false
|
||||
|
Loading…
Reference in New Issue
Block a user