mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 06:54:01 +00:00
Move container status AllocatedResources behind a separate feature gate
This commit is contained in:
parent
3f5d0ee2cf
commit
0f0e27d226
@ -828,18 +828,29 @@ func dropDisabledPodStatusFields(podStatus, oldPodStatus *api.PodStatus, podSpec
|
||||
}
|
||||
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) && !inPlacePodVerticalScalingInUse(oldPodSpec) {
|
||||
// Drop Resize, AllocatedResources, and Resources fields
|
||||
dropResourcesFields := func(csl []api.ContainerStatus) {
|
||||
// Drop Resize and Resources fields
|
||||
dropResourcesField := func(csl []api.ContainerStatus) {
|
||||
for i := range csl {
|
||||
csl[i].AllocatedResources = nil
|
||||
csl[i].Resources = nil
|
||||
}
|
||||
}
|
||||
dropResourcesFields(podStatus.ContainerStatuses)
|
||||
dropResourcesFields(podStatus.InitContainerStatuses)
|
||||
dropResourcesFields(podStatus.EphemeralContainerStatuses)
|
||||
dropResourcesField(podStatus.ContainerStatuses)
|
||||
dropResourcesField(podStatus.InitContainerStatuses)
|
||||
dropResourcesField(podStatus.EphemeralContainerStatuses)
|
||||
podStatus.Resize = ""
|
||||
}
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) ||
|
||||
!utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScalingAllocatedStatus) {
|
||||
// Drop AllocatedResources field
|
||||
dropAllocatedResourcesField := func(csl []api.ContainerStatus) {
|
||||
for i := range csl {
|
||||
csl[i].AllocatedResources = nil
|
||||
}
|
||||
}
|
||||
dropAllocatedResourcesField(podStatus.ContainerStatuses)
|
||||
dropAllocatedResourcesField(podStatus.InitContainerStatuses)
|
||||
dropAllocatedResourcesField(podStatus.EphemeralContainerStatuses)
|
||||
}
|
||||
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) && !dynamicResourceAllocationInUse(oldPodSpec) {
|
||||
podStatus.ResourceClaimStatuses = nil
|
||||
|
@ -2635,56 +2635,69 @@ func TestDropInPlacePodVerticalScaling(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
for _, enabled := range []bool{true, false} {
|
||||
for _, oldPodInfo := range podInfo {
|
||||
for _, newPodInfo := range podInfo {
|
||||
oldPodHasInPlaceVerticalScaling, oldPod := oldPodInfo.hasInPlaceVerticalScaling, oldPodInfo.pod()
|
||||
newPodHasInPlaceVerticalScaling, newPod := newPodInfo.hasInPlaceVerticalScaling, newPodInfo.pod()
|
||||
if newPod == nil {
|
||||
continue
|
||||
}
|
||||
for _, ippvsEnabled := range []bool{true, false} {
|
||||
t.Run(fmt.Sprintf("InPlacePodVerticalScaling=%t", ippvsEnabled), func(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, ippvsEnabled)
|
||||
|
||||
t.Run(fmt.Sprintf("feature enabled=%v, old pod %v, new pod %v", enabled, oldPodInfo.description, newPodInfo.description), func(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, enabled)
|
||||
for _, allocatedStatusEnabled := range []bool{true, false} {
|
||||
t.Run(fmt.Sprintf("AllocatedStatus=%t", allocatedStatusEnabled), func(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingAllocatedStatus, allocatedStatusEnabled)
|
||||
|
||||
var oldPodSpec *api.PodSpec
|
||||
var oldPodStatus *api.PodStatus
|
||||
if oldPod != nil {
|
||||
oldPodSpec = &oldPod.Spec
|
||||
oldPodStatus = &oldPod.Status
|
||||
}
|
||||
dropDisabledFields(&newPod.Spec, nil, oldPodSpec, nil)
|
||||
dropDisabledPodStatusFields(&newPod.Status, oldPodStatus, &newPod.Spec, oldPodSpec)
|
||||
for _, oldPodInfo := range podInfo {
|
||||
for _, newPodInfo := range podInfo {
|
||||
oldPodHasInPlaceVerticalScaling, oldPod := oldPodInfo.hasInPlaceVerticalScaling, oldPodInfo.pod()
|
||||
newPodHasInPlaceVerticalScaling, newPod := newPodInfo.hasInPlaceVerticalScaling, newPodInfo.pod()
|
||||
if newPod == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// old pod should never be changed
|
||||
if !reflect.DeepEqual(oldPod, oldPodInfo.pod()) {
|
||||
t.Errorf("old pod changed: %v", cmp.Diff(oldPod, oldPodInfo.pod()))
|
||||
}
|
||||
t.Run(fmt.Sprintf("old pod %v, new pod %v", oldPodInfo.description, newPodInfo.description), func(t *testing.T) {
|
||||
var oldPodSpec *api.PodSpec
|
||||
var oldPodStatus *api.PodStatus
|
||||
if oldPod != nil {
|
||||
oldPodSpec = &oldPod.Spec
|
||||
oldPodStatus = &oldPod.Status
|
||||
}
|
||||
dropDisabledFields(&newPod.Spec, nil, oldPodSpec, nil)
|
||||
dropDisabledPodStatusFields(&newPod.Status, oldPodStatus, &newPod.Spec, oldPodSpec)
|
||||
|
||||
switch {
|
||||
case enabled || oldPodHasInPlaceVerticalScaling:
|
||||
// new pod shouldn't change if feature enabled or if old pod has ResizePolicy set
|
||||
if !reflect.DeepEqual(newPod, newPodInfo.pod()) {
|
||||
t.Errorf("new pod changed: %v", cmp.Diff(newPod, newPodInfo.pod()))
|
||||
}
|
||||
case newPodHasInPlaceVerticalScaling:
|
||||
// new pod should be changed
|
||||
if reflect.DeepEqual(newPod, newPodInfo.pod()) {
|
||||
t.Errorf("new pod was not changed")
|
||||
}
|
||||
// new pod should not have ResizePolicy
|
||||
if !reflect.DeepEqual(newPod, podWithoutInPlaceVerticalScaling()) {
|
||||
t.Errorf("new pod has ResizePolicy: %v", cmp.Diff(newPod, podWithoutInPlaceVerticalScaling()))
|
||||
}
|
||||
default:
|
||||
// new pod should not need to be changed
|
||||
if !reflect.DeepEqual(newPod, newPodInfo.pod()) {
|
||||
t.Errorf("new pod changed: %v", cmp.Diff(newPod, newPodInfo.pod()))
|
||||
// old pod should never be changed
|
||||
if !reflect.DeepEqual(oldPod, oldPodInfo.pod()) {
|
||||
t.Errorf("old pod changed: %v", cmp.Diff(oldPod, oldPodInfo.pod()))
|
||||
}
|
||||
|
||||
switch {
|
||||
case ippvsEnabled || oldPodHasInPlaceVerticalScaling:
|
||||
// new pod shouldn't change if feature enabled or if old pod has ResizePolicy set
|
||||
expected := newPodInfo.pod()
|
||||
if !ippvsEnabled || !allocatedStatusEnabled {
|
||||
expected.Status.ContainerStatuses[0].AllocatedResources = nil
|
||||
}
|
||||
if !reflect.DeepEqual(newPod, expected) {
|
||||
t.Errorf("new pod changed: %v", cmp.Diff(newPod, expected))
|
||||
}
|
||||
case newPodHasInPlaceVerticalScaling:
|
||||
// new pod should be changed
|
||||
if reflect.DeepEqual(newPod, newPodInfo.pod()) {
|
||||
t.Errorf("new pod was not changed")
|
||||
}
|
||||
// new pod should not have ResizePolicy
|
||||
if !reflect.DeepEqual(newPod, podWithoutInPlaceVerticalScaling()) {
|
||||
t.Errorf("new pod has ResizePolicy: %v", cmp.Diff(newPod, podWithoutInPlaceVerticalScaling()))
|
||||
}
|
||||
default:
|
||||
// new pod should not need to be changed
|
||||
if !reflect.DeepEqual(newPod, newPodInfo.pod()) {
|
||||
t.Errorf("new pod changed: %v", cmp.Diff(newPod, newPodInfo.pod()))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -264,6 +264,19 @@ const (
|
||||
// deletion ordering.
|
||||
HonorPVReclaimPolicy featuregate.Feature = "HonorPVReclaimPolicy"
|
||||
|
||||
// owner: @vinaykul,@tallclair
|
||||
// kep: http://kep.k8s.io/1287
|
||||
//
|
||||
// Enables In-Place Pod Vertical Scaling
|
||||
InPlacePodVerticalScaling featuregate.Feature = "InPlacePodVerticalScaling"
|
||||
|
||||
// owner: @tallclair
|
||||
// kep: http://kep.k8s.io/1287
|
||||
//
|
||||
// Enables the AllocatedResources field in container status. This feature requires
|
||||
// InPlacePodVerticalScaling also be enabled.
|
||||
InPlacePodVerticalScalingAllocatedStatus featuregate.Feature = "InPlacePodVerticalScalingAllocatedStatus"
|
||||
|
||||
// owner: @trierra
|
||||
//
|
||||
// Disables the Portworx in-tree driver.
|
||||
@ -733,12 +746,6 @@ const (
|
||||
// Initial implementation focused on ReadWriteOncePod volumes.
|
||||
SELinuxMountReadWriteOncePod featuregate.Feature = "SELinuxMountReadWriteOncePod"
|
||||
|
||||
// owner: @vinaykul
|
||||
// kep: http://kep.k8s.io/1287
|
||||
//
|
||||
// Enables In-Place Pod Vertical Scaling
|
||||
InPlacePodVerticalScaling featuregate.Feature = "InPlacePodVerticalScaling"
|
||||
|
||||
// owner: @Sh4d1,@RyanAoh,@rikatz
|
||||
// kep: http://kep.k8s.io/1860
|
||||
// LoadBalancerIPMode enables the IPMode field in the LoadBalancerIngress status of a Service
|
||||
|
@ -391,6 +391,10 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate
|
||||
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
InPlacePodVerticalScalingAllocatedStatus: {
|
||||
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
||||
InTreePluginPortworxUnregister: {
|
||||
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
|
||||
},
|
||||
|
@ -2095,18 +2095,14 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon
|
||||
}
|
||||
}
|
||||
}
|
||||
container := kubecontainer.GetContainerSpec(pod, cName)
|
||||
|
||||
// Always set the status to the latest allocated resources, even if it differs from the
|
||||
// allocation used by the current sync loop.
|
||||
alloc, found := kl.statusManager.GetContainerResourceAllocation(string(pod.UID), cName)
|
||||
if found {
|
||||
status.AllocatedResources = alloc.Requests
|
||||
} else if !(container.Resources.Requests == nil && container.Resources.Limits == nil) {
|
||||
// This case is expected for ephemeral containers.
|
||||
if oldStatusFound {
|
||||
status.AllocatedResources = oldStatus.AllocatedResources
|
||||
}
|
||||
if !found {
|
||||
// This case is expected for non-resizeable containers.
|
||||
// Don't set status.Resources in this case.
|
||||
return nil
|
||||
}
|
||||
if oldStatus.Resources == nil {
|
||||
oldStatus.Resources = &v1.ResourceRequirements{}
|
||||
@ -2342,6 +2338,12 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon
|
||||
if status.State.Running != nil {
|
||||
status.Resources = convertContainerStatusResources(cName, status, cStatus, oldStatuses)
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScalingAllocatedStatus) {
|
||||
if alloc, found := kl.statusManager.GetContainerResourceAllocation(string(pod.UID), cName); found {
|
||||
status.AllocatedResources = alloc.Requests
|
||||
}
|
||||
}
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.SupplementalGroupsPolicy) {
|
||||
status.User = convertContainerStatusUser(cStatus)
|
||||
|
@ -4543,6 +4543,7 @@ func TestConvertToAPIContainerStatusesDataRace(t *testing.T) {
|
||||
|
||||
func TestConvertToAPIContainerStatusesForResources(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
|
||||
|
||||
nowTime := time.Now()
|
||||
testContainerName := "ctr0"
|
||||
testContainerID := kubecontainer.ContainerID{Type: "test", ID: testContainerName}
|
||||
@ -4843,26 +4844,41 @@ func TestConvertToAPIContainerStatusesForResources(t *testing.T) {
|
||||
},
|
||||
},
|
||||
} {
|
||||
tPod := testPod.DeepCopy()
|
||||
tPod.Name = fmt.Sprintf("%s-%d", testPod.Name, idx)
|
||||
for i := range tPod.Spec.Containers {
|
||||
if tc.Resources != nil {
|
||||
tPod.Spec.Containers[i].Resources = tc.Resources[i]
|
||||
}
|
||||
kubelet.statusManager.SetPodAllocation(tPod)
|
||||
if tc.Resources != nil {
|
||||
tPod.Status.ContainerStatuses[i].AllocatedResources = tc.Resources[i].Requests
|
||||
testPodStatus.ContainerStatuses[i].Resources = &kubecontainer.ContainerResources{
|
||||
MemoryLimit: tc.Resources[i].Limits.Memory(),
|
||||
CPULimit: tc.Resources[i].Limits.Cpu(),
|
||||
CPURequest: tc.Resources[i].Requests.Cpu(),
|
||||
t.Run(tdesc, func(t *testing.T) {
|
||||
tPod := testPod.DeepCopy()
|
||||
tPod.Name = fmt.Sprintf("%s-%d", testPod.Name, idx)
|
||||
for i := range tPod.Spec.Containers {
|
||||
if tc.Resources != nil {
|
||||
tPod.Spec.Containers[i].Resources = tc.Resources[i]
|
||||
}
|
||||
kubelet.statusManager.SetPodAllocation(tPod)
|
||||
if tc.Resources != nil {
|
||||
testPodStatus.ContainerStatuses[i].Resources = &kubecontainer.ContainerResources{
|
||||
MemoryLimit: tc.Resources[i].Limits.Memory(),
|
||||
CPULimit: tc.Resources[i].Limits.Cpu(),
|
||||
CPURequest: tc.Resources[i].Requests.Cpu(),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("TestCase: %q", tdesc)
|
||||
cStatuses := kubelet.convertToAPIContainerStatuses(tPod, testPodStatus, tc.OldStatus, tPod.Spec.Containers, false, false)
|
||||
assert.Equal(t, tc.Expected, cStatuses)
|
||||
for _, enableAllocatedStatus := range []bool{true, false} {
|
||||
t.Run(fmt.Sprintf("AllocatedStatus=%t", enableAllocatedStatus), func(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingAllocatedStatus, enableAllocatedStatus)
|
||||
|
||||
expected := tc.Expected
|
||||
if !enableAllocatedStatus {
|
||||
for i, status := range expected {
|
||||
noAllocated := *status.DeepCopy()
|
||||
noAllocated.AllocatedResources = nil
|
||||
expected[i] = noAllocated
|
||||
}
|
||||
}
|
||||
|
||||
cStatuses := kubelet.convertToAPIContainerStatuses(tPod, testPodStatus, tc.OldStatus, tPod.Spec.Containers, false, false)
|
||||
assert.Equal(t, expected, cStatuses)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user