mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 15:25:57 +00:00
Move container status AllocatedResources behind a separate feature gate
This commit is contained in:
parent
3f5d0ee2cf
commit
0f0e27d226
@ -828,18 +828,29 @@ func dropDisabledPodStatusFields(podStatus, oldPodStatus *api.PodStatus, podSpec
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) && !inPlacePodVerticalScalingInUse(oldPodSpec) {
|
if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) && !inPlacePodVerticalScalingInUse(oldPodSpec) {
|
||||||
// Drop Resize, AllocatedResources, and Resources fields
|
// Drop Resize and Resources fields
|
||||||
dropResourcesFields := func(csl []api.ContainerStatus) {
|
dropResourcesField := func(csl []api.ContainerStatus) {
|
||||||
for i := range csl {
|
for i := range csl {
|
||||||
csl[i].AllocatedResources = nil
|
|
||||||
csl[i].Resources = nil
|
csl[i].Resources = nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dropResourcesFields(podStatus.ContainerStatuses)
|
dropResourcesField(podStatus.ContainerStatuses)
|
||||||
dropResourcesFields(podStatus.InitContainerStatuses)
|
dropResourcesField(podStatus.InitContainerStatuses)
|
||||||
dropResourcesFields(podStatus.EphemeralContainerStatuses)
|
dropResourcesField(podStatus.EphemeralContainerStatuses)
|
||||||
podStatus.Resize = ""
|
podStatus.Resize = ""
|
||||||
}
|
}
|
||||||
|
if !utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) ||
|
||||||
|
!utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScalingAllocatedStatus) {
|
||||||
|
// Drop AllocatedResources field
|
||||||
|
dropAllocatedResourcesField := func(csl []api.ContainerStatus) {
|
||||||
|
for i := range csl {
|
||||||
|
csl[i].AllocatedResources = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dropAllocatedResourcesField(podStatus.ContainerStatuses)
|
||||||
|
dropAllocatedResourcesField(podStatus.InitContainerStatuses)
|
||||||
|
dropAllocatedResourcesField(podStatus.EphemeralContainerStatuses)
|
||||||
|
}
|
||||||
|
|
||||||
if !utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) && !dynamicResourceAllocationInUse(oldPodSpec) {
|
if !utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) && !dynamicResourceAllocationInUse(oldPodSpec) {
|
||||||
podStatus.ResourceClaimStatuses = nil
|
podStatus.ResourceClaimStatuses = nil
|
||||||
|
@ -2635,56 +2635,69 @@ func TestDropInPlacePodVerticalScaling(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, enabled := range []bool{true, false} {
|
for _, ippvsEnabled := range []bool{true, false} {
|
||||||
for _, oldPodInfo := range podInfo {
|
t.Run(fmt.Sprintf("InPlacePodVerticalScaling=%t", ippvsEnabled), func(t *testing.T) {
|
||||||
for _, newPodInfo := range podInfo {
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, ippvsEnabled)
|
||||||
oldPodHasInPlaceVerticalScaling, oldPod := oldPodInfo.hasInPlaceVerticalScaling, oldPodInfo.pod()
|
|
||||||
newPodHasInPlaceVerticalScaling, newPod := newPodInfo.hasInPlaceVerticalScaling, newPodInfo.pod()
|
|
||||||
if newPod == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Run(fmt.Sprintf("feature enabled=%v, old pod %v, new pod %v", enabled, oldPodInfo.description, newPodInfo.description), func(t *testing.T) {
|
for _, allocatedStatusEnabled := range []bool{true, false} {
|
||||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, enabled)
|
t.Run(fmt.Sprintf("AllocatedStatus=%t", allocatedStatusEnabled), func(t *testing.T) {
|
||||||
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingAllocatedStatus, allocatedStatusEnabled)
|
||||||
|
|
||||||
var oldPodSpec *api.PodSpec
|
for _, oldPodInfo := range podInfo {
|
||||||
var oldPodStatus *api.PodStatus
|
for _, newPodInfo := range podInfo {
|
||||||
if oldPod != nil {
|
oldPodHasInPlaceVerticalScaling, oldPod := oldPodInfo.hasInPlaceVerticalScaling, oldPodInfo.pod()
|
||||||
oldPodSpec = &oldPod.Spec
|
newPodHasInPlaceVerticalScaling, newPod := newPodInfo.hasInPlaceVerticalScaling, newPodInfo.pod()
|
||||||
oldPodStatus = &oldPod.Status
|
if newPod == nil {
|
||||||
}
|
continue
|
||||||
dropDisabledFields(&newPod.Spec, nil, oldPodSpec, nil)
|
}
|
||||||
dropDisabledPodStatusFields(&newPod.Status, oldPodStatus, &newPod.Spec, oldPodSpec)
|
|
||||||
|
|
||||||
// old pod should never be changed
|
t.Run(fmt.Sprintf("old pod %v, new pod %v", oldPodInfo.description, newPodInfo.description), func(t *testing.T) {
|
||||||
if !reflect.DeepEqual(oldPod, oldPodInfo.pod()) {
|
var oldPodSpec *api.PodSpec
|
||||||
t.Errorf("old pod changed: %v", cmp.Diff(oldPod, oldPodInfo.pod()))
|
var oldPodStatus *api.PodStatus
|
||||||
}
|
if oldPod != nil {
|
||||||
|
oldPodSpec = &oldPod.Spec
|
||||||
|
oldPodStatus = &oldPod.Status
|
||||||
|
}
|
||||||
|
dropDisabledFields(&newPod.Spec, nil, oldPodSpec, nil)
|
||||||
|
dropDisabledPodStatusFields(&newPod.Status, oldPodStatus, &newPod.Spec, oldPodSpec)
|
||||||
|
|
||||||
switch {
|
// old pod should never be changed
|
||||||
case enabled || oldPodHasInPlaceVerticalScaling:
|
if !reflect.DeepEqual(oldPod, oldPodInfo.pod()) {
|
||||||
// new pod shouldn't change if feature enabled or if old pod has ResizePolicy set
|
t.Errorf("old pod changed: %v", cmp.Diff(oldPod, oldPodInfo.pod()))
|
||||||
if !reflect.DeepEqual(newPod, newPodInfo.pod()) {
|
}
|
||||||
t.Errorf("new pod changed: %v", cmp.Diff(newPod, newPodInfo.pod()))
|
|
||||||
}
|
switch {
|
||||||
case newPodHasInPlaceVerticalScaling:
|
case ippvsEnabled || oldPodHasInPlaceVerticalScaling:
|
||||||
// new pod should be changed
|
// new pod shouldn't change if feature enabled or if old pod has ResizePolicy set
|
||||||
if reflect.DeepEqual(newPod, newPodInfo.pod()) {
|
expected := newPodInfo.pod()
|
||||||
t.Errorf("new pod was not changed")
|
if !ippvsEnabled || !allocatedStatusEnabled {
|
||||||
}
|
expected.Status.ContainerStatuses[0].AllocatedResources = nil
|
||||||
// new pod should not have ResizePolicy
|
}
|
||||||
if !reflect.DeepEqual(newPod, podWithoutInPlaceVerticalScaling()) {
|
if !reflect.DeepEqual(newPod, expected) {
|
||||||
t.Errorf("new pod has ResizePolicy: %v", cmp.Diff(newPod, podWithoutInPlaceVerticalScaling()))
|
t.Errorf("new pod changed: %v", cmp.Diff(newPod, expected))
|
||||||
}
|
}
|
||||||
default:
|
case newPodHasInPlaceVerticalScaling:
|
||||||
// new pod should not need to be changed
|
// new pod should be changed
|
||||||
if !reflect.DeepEqual(newPod, newPodInfo.pod()) {
|
if reflect.DeepEqual(newPod, newPodInfo.pod()) {
|
||||||
t.Errorf("new pod changed: %v", cmp.Diff(newPod, newPodInfo.pod()))
|
t.Errorf("new pod was not changed")
|
||||||
|
}
|
||||||
|
// new pod should not have ResizePolicy
|
||||||
|
if !reflect.DeepEqual(newPod, podWithoutInPlaceVerticalScaling()) {
|
||||||
|
t.Errorf("new pod has ResizePolicy: %v", cmp.Diff(newPod, podWithoutInPlaceVerticalScaling()))
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// new pod should not need to be changed
|
||||||
|
if !reflect.DeepEqual(newPod, newPodInfo.pod()) {
|
||||||
|
t.Errorf("new pod changed: %v", cmp.Diff(newPod, newPodInfo.pod()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -264,6 +264,19 @@ const (
|
|||||||
// deletion ordering.
|
// deletion ordering.
|
||||||
HonorPVReclaimPolicy featuregate.Feature = "HonorPVReclaimPolicy"
|
HonorPVReclaimPolicy featuregate.Feature = "HonorPVReclaimPolicy"
|
||||||
|
|
||||||
|
// owner: @vinaykul,@tallclair
|
||||||
|
// kep: http://kep.k8s.io/1287
|
||||||
|
//
|
||||||
|
// Enables In-Place Pod Vertical Scaling
|
||||||
|
InPlacePodVerticalScaling featuregate.Feature = "InPlacePodVerticalScaling"
|
||||||
|
|
||||||
|
// owner: @tallclair
|
||||||
|
// kep: http://kep.k8s.io/1287
|
||||||
|
//
|
||||||
|
// Enables the AllocatedResources field in container status. This feature requires
|
||||||
|
// InPlacePodVerticalScaling also be enabled.
|
||||||
|
InPlacePodVerticalScalingAllocatedStatus featuregate.Feature = "InPlacePodVerticalScalingAllocatedStatus"
|
||||||
|
|
||||||
// owner: @trierra
|
// owner: @trierra
|
||||||
//
|
//
|
||||||
// Disables the Portworx in-tree driver.
|
// Disables the Portworx in-tree driver.
|
||||||
@ -733,12 +746,6 @@ const (
|
|||||||
// Initial implementation focused on ReadWriteOncePod volumes.
|
// Initial implementation focused on ReadWriteOncePod volumes.
|
||||||
SELinuxMountReadWriteOncePod featuregate.Feature = "SELinuxMountReadWriteOncePod"
|
SELinuxMountReadWriteOncePod featuregate.Feature = "SELinuxMountReadWriteOncePod"
|
||||||
|
|
||||||
// owner: @vinaykul
|
|
||||||
// kep: http://kep.k8s.io/1287
|
|
||||||
//
|
|
||||||
// Enables In-Place Pod Vertical Scaling
|
|
||||||
InPlacePodVerticalScaling featuregate.Feature = "InPlacePodVerticalScaling"
|
|
||||||
|
|
||||||
// owner: @Sh4d1,@RyanAoh,@rikatz
|
// owner: @Sh4d1,@RyanAoh,@rikatz
|
||||||
// kep: http://kep.k8s.io/1860
|
// kep: http://kep.k8s.io/1860
|
||||||
// LoadBalancerIPMode enables the IPMode field in the LoadBalancerIngress status of a Service
|
// LoadBalancerIPMode enables the IPMode field in the LoadBalancerIngress status of a Service
|
||||||
|
@ -391,6 +391,10 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate
|
|||||||
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
|
{Version: version.MustParse("1.27"), Default: false, PreRelease: featuregate.Alpha},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
InPlacePodVerticalScalingAllocatedStatus: {
|
||||||
|
{Version: version.MustParse("1.32"), Default: false, PreRelease: featuregate.Alpha},
|
||||||
|
},
|
||||||
|
|
||||||
InTreePluginPortworxUnregister: {
|
InTreePluginPortworxUnregister: {
|
||||||
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
|
{Version: version.MustParse("1.23"), Default: false, PreRelease: featuregate.Alpha},
|
||||||
},
|
},
|
||||||
|
@ -2095,18 +2095,14 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
container := kubecontainer.GetContainerSpec(pod, cName)
|
|
||||||
|
|
||||||
// Always set the status to the latest allocated resources, even if it differs from the
|
// Always set the status to the latest allocated resources, even if it differs from the
|
||||||
// allocation used by the current sync loop.
|
// allocation used by the current sync loop.
|
||||||
alloc, found := kl.statusManager.GetContainerResourceAllocation(string(pod.UID), cName)
|
alloc, found := kl.statusManager.GetContainerResourceAllocation(string(pod.UID), cName)
|
||||||
if found {
|
if !found {
|
||||||
status.AllocatedResources = alloc.Requests
|
// This case is expected for non-resizeable containers.
|
||||||
} else if !(container.Resources.Requests == nil && container.Resources.Limits == nil) {
|
// Don't set status.Resources in this case.
|
||||||
// This case is expected for ephemeral containers.
|
return nil
|
||||||
if oldStatusFound {
|
|
||||||
status.AllocatedResources = oldStatus.AllocatedResources
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if oldStatus.Resources == nil {
|
if oldStatus.Resources == nil {
|
||||||
oldStatus.Resources = &v1.ResourceRequirements{}
|
oldStatus.Resources = &v1.ResourceRequirements{}
|
||||||
@ -2342,6 +2338,12 @@ func (kl *Kubelet) convertToAPIContainerStatuses(pod *v1.Pod, podStatus *kubecon
|
|||||||
if status.State.Running != nil {
|
if status.State.Running != nil {
|
||||||
status.Resources = convertContainerStatusResources(cName, status, cStatus, oldStatuses)
|
status.Resources = convertContainerStatusResources(cName, status, cStatus, oldStatuses)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScalingAllocatedStatus) {
|
||||||
|
if alloc, found := kl.statusManager.GetContainerResourceAllocation(string(pod.UID), cName); found {
|
||||||
|
status.AllocatedResources = alloc.Requests
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.SupplementalGroupsPolicy) {
|
if utilfeature.DefaultFeatureGate.Enabled(features.SupplementalGroupsPolicy) {
|
||||||
status.User = convertContainerStatusUser(cStatus)
|
status.User = convertContainerStatusUser(cStatus)
|
||||||
|
@ -4543,6 +4543,7 @@ func TestConvertToAPIContainerStatusesDataRace(t *testing.T) {
|
|||||||
|
|
||||||
func TestConvertToAPIContainerStatusesForResources(t *testing.T) {
|
func TestConvertToAPIContainerStatusesForResources(t *testing.T) {
|
||||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScaling, true)
|
||||||
|
|
||||||
nowTime := time.Now()
|
nowTime := time.Now()
|
||||||
testContainerName := "ctr0"
|
testContainerName := "ctr0"
|
||||||
testContainerID := kubecontainer.ContainerID{Type: "test", ID: testContainerName}
|
testContainerID := kubecontainer.ContainerID{Type: "test", ID: testContainerName}
|
||||||
@ -4843,26 +4844,41 @@ func TestConvertToAPIContainerStatusesForResources(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
tPod := testPod.DeepCopy()
|
t.Run(tdesc, func(t *testing.T) {
|
||||||
tPod.Name = fmt.Sprintf("%s-%d", testPod.Name, idx)
|
tPod := testPod.DeepCopy()
|
||||||
for i := range tPod.Spec.Containers {
|
tPod.Name = fmt.Sprintf("%s-%d", testPod.Name, idx)
|
||||||
if tc.Resources != nil {
|
for i := range tPod.Spec.Containers {
|
||||||
tPod.Spec.Containers[i].Resources = tc.Resources[i]
|
if tc.Resources != nil {
|
||||||
}
|
tPod.Spec.Containers[i].Resources = tc.Resources[i]
|
||||||
kubelet.statusManager.SetPodAllocation(tPod)
|
}
|
||||||
if tc.Resources != nil {
|
kubelet.statusManager.SetPodAllocation(tPod)
|
||||||
tPod.Status.ContainerStatuses[i].AllocatedResources = tc.Resources[i].Requests
|
if tc.Resources != nil {
|
||||||
testPodStatus.ContainerStatuses[i].Resources = &kubecontainer.ContainerResources{
|
testPodStatus.ContainerStatuses[i].Resources = &kubecontainer.ContainerResources{
|
||||||
MemoryLimit: tc.Resources[i].Limits.Memory(),
|
MemoryLimit: tc.Resources[i].Limits.Memory(),
|
||||||
CPULimit: tc.Resources[i].Limits.Cpu(),
|
CPULimit: tc.Resources[i].Limits.Cpu(),
|
||||||
CPURequest: tc.Resources[i].Requests.Cpu(),
|
CPURequest: tc.Resources[i].Requests.Cpu(),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
t.Logf("TestCase: %q", tdesc)
|
for _, enableAllocatedStatus := range []bool{true, false} {
|
||||||
cStatuses := kubelet.convertToAPIContainerStatuses(tPod, testPodStatus, tc.OldStatus, tPod.Spec.Containers, false, false)
|
t.Run(fmt.Sprintf("AllocatedStatus=%t", enableAllocatedStatus), func(t *testing.T) {
|
||||||
assert.Equal(t, tc.Expected, cStatuses)
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.InPlacePodVerticalScalingAllocatedStatus, enableAllocatedStatus)
|
||||||
|
|
||||||
|
expected := tc.Expected
|
||||||
|
if !enableAllocatedStatus {
|
||||||
|
for i, status := range expected {
|
||||||
|
noAllocated := *status.DeepCopy()
|
||||||
|
noAllocated.AllocatedResources = nil
|
||||||
|
expected[i] = noAllocated
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
cStatuses := kubelet.convertToAPIContainerStatuses(tPod, testPodStatus, tc.OldStatus, tPod.Spec.Containers, false, false)
|
||||||
|
assert.Equal(t, expected, cStatuses)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user