mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 04:06:03 +00:00
Merge pull request #129731 from gjkim42/promote-sidecar-containers-to-ga
Promote SidecarContainers feature to GA
This commit is contained in:
commit
fab0d76574
@ -33,6 +33,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/apimachinery/pkg/util/version"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
@ -2908,7 +2909,11 @@ func TestDropSidecarContainers(t *testing.T) {
|
||||
}
|
||||
|
||||
t.Run(fmt.Sprintf("feature enabled=%v, old pod %v, new pod %v", enabled, oldPodInfo.description, newPodInfo.description), func(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, enabled)
|
||||
if !enabled {
|
||||
// TODO: Remove this in v1.36
|
||||
featuregatetesting.SetFeatureGateEmulationVersionDuringTest(t, utilfeature.DefaultFeatureGate, version.MustParse("1.32"))
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, false)
|
||||
}
|
||||
|
||||
var oldPodSpec *api.PodSpec
|
||||
if oldPod != nil {
|
||||
|
@ -25611,8 +25611,6 @@ func TestValidateSELinuxChangePolicy(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidatePodResize(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
|
||||
|
||||
mkPod := func(req, lim core.ResourceList, tweaks ...podtest.Tweak) *core.Pod {
|
||||
return podtest.MakePod("pod", append(tweaks,
|
||||
podtest.SetContainers(
|
||||
|
@ -23,10 +23,7 @@ import (
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/klog/v2/ktesting"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
clocktesting "k8s.io/utils/clock/testing"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
@ -203,7 +200,6 @@ func TestNewBackoffRecord(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetFinishedTime(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
|
||||
defaultTestTime := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
|
||||
defaultTestTimeMinus30s := defaultTestTime.Add(-30 * time.Second)
|
||||
containerRestartPolicyAlways := v1.ContainerRestartPolicyAlways
|
||||
|
@ -713,6 +713,7 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate
|
||||
SidecarContainers: {
|
||||
{Version: version.MustParse("1.28"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.29"), Default: true, PreRelease: featuregate.Beta},
|
||||
{Version: version.MustParse("1.33"), Default: true, LockToDefault: true, PreRelease: featuregate.GA}, // GA in 1.33 remove in 1.36
|
||||
},
|
||||
|
||||
SizeMemoryBackedVolumes: {
|
||||
|
@ -66,16 +66,13 @@ func (p *v1PodResourcesServer) List(ctx context.Context, req *podresourcesv1.Lis
|
||||
Containers: make([]*podresourcesv1.ContainerResources, 0, len(pod.Spec.Containers)),
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SidecarContainers) {
|
||||
pRes.Containers = make([]*podresourcesv1.ContainerResources, 0, len(pod.Spec.InitContainers)+len(pod.Spec.Containers))
|
||||
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
if !podutil.IsRestartableInitContainer(&container) {
|
||||
continue
|
||||
}
|
||||
|
||||
pRes.Containers = append(pRes.Containers, p.getContainerResources(pod, &container))
|
||||
pRes.Containers = make([]*podresourcesv1.ContainerResources, 0, len(pod.Spec.InitContainers)+len(pod.Spec.Containers))
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
if !podutil.IsRestartableInitContainer(&container) {
|
||||
continue
|
||||
}
|
||||
|
||||
pRes.Containers = append(pRes.Containers, p.getContainerResources(pod, &container))
|
||||
}
|
||||
|
||||
for _, container := range pod.Spec.Containers {
|
||||
@ -126,16 +123,13 @@ func (p *v1PodResourcesServer) Get(ctx context.Context, req *podresourcesv1.GetP
|
||||
Containers: make([]*podresourcesv1.ContainerResources, 0, len(pod.Spec.Containers)),
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.SidecarContainers) {
|
||||
podResources.Containers = make([]*podresourcesv1.ContainerResources, 0, len(pod.Spec.InitContainers)+len(pod.Spec.Containers))
|
||||
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
if !podutil.IsRestartableInitContainer(&container) {
|
||||
continue
|
||||
}
|
||||
|
||||
podResources.Containers = append(podResources.Containers, p.getContainerResources(pod, &container))
|
||||
podResources.Containers = make([]*podresourcesv1.ContainerResources, 0, len(pod.Spec.InitContainers)+len(pod.Spec.Containers))
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
if !podutil.IsRestartableInitContainer(&container) {
|
||||
continue
|
||||
}
|
||||
|
||||
podResources.Containers = append(podResources.Containers, p.getContainerResources(pod, &container))
|
||||
}
|
||||
|
||||
for _, container := range pod.Spec.Containers {
|
||||
|
@ -298,8 +298,7 @@ func TestListPodResourcesWithInitContainersV1(t *testing.T) {
|
||||
*podresourcetest.MockCPUsProvider,
|
||||
*podresourcetest.MockMemoryProvider,
|
||||
*podresourcetest.MockDynamicResourcesProvider)
|
||||
sidecarContainersEnabled bool
|
||||
expectedResponse *podresourcesapi.ListPodResourcesResponse
|
||||
expectedResponse *podresourcesapi.ListPodResourcesResponse
|
||||
}{
|
||||
{
|
||||
desc: "pod having an init container",
|
||||
@ -352,110 +351,7 @@ func TestListPodResourcesWithInitContainersV1(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "pod having an init container with SidecarContainers enabled",
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: podNamespace,
|
||||
UID: podUID,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Name: initContainerName,
|
||||
},
|
||||
},
|
||||
Containers: containers,
|
||||
},
|
||||
},
|
||||
},
|
||||
mockFunc: func(
|
||||
pods []*v1.Pod,
|
||||
devicesProvider *podresourcetest.MockDevicesProvider,
|
||||
cpusProvider *podresourcetest.MockCPUsProvider,
|
||||
memoryProvider *podresourcetest.MockMemoryProvider,
|
||||
dynamicResourcesProvider *podresourcetest.MockDynamicResourcesProvider) {
|
||||
devicesProvider.EXPECT().UpdateAllocatedDevices().Return().Maybe()
|
||||
devicesProvider.EXPECT().GetDevices(string(podUID), containerName).Return(devs).Maybe()
|
||||
cpusProvider.EXPECT().GetCPUs(string(podUID), containerName).Return(cpus).Maybe()
|
||||
memoryProvider.EXPECT().GetMemory(string(podUID), containerName).Return(memory).Maybe()
|
||||
dynamicResourcesProvider.EXPECT().GetDynamicResources(pods[0], &pods[0].Spec.Containers[0]).Return([]*podresourcesapi.DynamicResource{}).Maybe()
|
||||
|
||||
},
|
||||
sidecarContainersEnabled: true,
|
||||
expectedResponse: &podresourcesapi.ListPodResourcesResponse{
|
||||
PodResources: []*podresourcesapi.PodResources{
|
||||
{
|
||||
Name: podName,
|
||||
Namespace: podNamespace,
|
||||
Containers: []*podresourcesapi.ContainerResources{
|
||||
{
|
||||
Name: containerName,
|
||||
Devices: devs,
|
||||
CpuIds: cpus,
|
||||
Memory: memory,
|
||||
DynamicResources: []*podresourcesapi.DynamicResource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "pod having a restartable init container with SidecarContainers disabled",
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: podNamespace,
|
||||
UID: podUID,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Name: initContainerName,
|
||||
RestartPolicy: &containerRestartPolicyAlways,
|
||||
},
|
||||
},
|
||||
Containers: containers,
|
||||
},
|
||||
},
|
||||
},
|
||||
mockFunc: func(
|
||||
pods []*v1.Pod,
|
||||
devicesProvider *podresourcetest.MockDevicesProvider,
|
||||
cpusProvider *podresourcetest.MockCPUsProvider,
|
||||
memoryProvider *podresourcetest.MockMemoryProvider,
|
||||
dynamicResourcesProvider *podresourcetest.MockDynamicResourcesProvider) {
|
||||
devicesProvider.EXPECT().UpdateAllocatedDevices().Return().Maybe()
|
||||
|
||||
devicesProvider.EXPECT().GetDevices(string(podUID), containerName).Return(devs).Maybe()
|
||||
cpusProvider.EXPECT().GetCPUs(string(podUID), containerName).Return(cpus).Maybe()
|
||||
memoryProvider.EXPECT().GetMemory(string(podUID), containerName).Return(memory).Maybe()
|
||||
dynamicResourcesProvider.EXPECT().GetDynamicResources(pods[0], &pods[0].Spec.Containers[0]).Return([]*podresourcesapi.DynamicResource{}).Maybe()
|
||||
|
||||
},
|
||||
expectedResponse: &podresourcesapi.ListPodResourcesResponse{
|
||||
PodResources: []*podresourcesapi.PodResources{
|
||||
{
|
||||
Name: podName,
|
||||
Namespace: podNamespace,
|
||||
Containers: []*podresourcesapi.ContainerResources{
|
||||
{
|
||||
Name: containerName,
|
||||
Devices: devs,
|
||||
CpuIds: cpus,
|
||||
Memory: memory,
|
||||
DynamicResources: []*podresourcesapi.DynamicResource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "pod having an init container with SidecarContainers enabled",
|
||||
desc: "pod having a restartable init container",
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -493,7 +389,6 @@ func TestListPodResourcesWithInitContainersV1(t *testing.T) {
|
||||
dynamicResourcesProvider.EXPECT().GetDynamicResources(pods[0], &pods[0].Spec.Containers[0]).Return([]*podresourcesapi.DynamicResource{}).Maybe()
|
||||
|
||||
},
|
||||
sidecarContainersEnabled: true,
|
||||
expectedResponse: &podresourcesapi.ListPodResourcesResponse{
|
||||
PodResources: []*podresourcesapi.PodResources{
|
||||
{
|
||||
@ -521,8 +416,6 @@ func TestListPodResourcesWithInitContainersV1(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.SidecarContainers, tc.sidecarContainersEnabled)
|
||||
|
||||
mockDevicesProvider := podresourcetest.NewMockDevicesProvider(t)
|
||||
mockPodsProvider := podresourcetest.NewMockPodsProvider(t)
|
||||
mockCPUsProvider := podresourcetest.NewMockCPUsProvider(t)
|
||||
@ -1069,8 +962,7 @@ func TestGetPodResourcesWithInitContainersV1(t *testing.T) {
|
||||
*podresourcetest.MockCPUsProvider,
|
||||
*podresourcetest.MockMemoryProvider,
|
||||
*podresourcetest.MockDynamicResourcesProvider)
|
||||
sidecarContainersEnabled bool
|
||||
expectedResponse *podresourcesapi.GetPodResourcesResponse
|
||||
expectedResponse *podresourcesapi.GetPodResourcesResponse
|
||||
}{
|
||||
{
|
||||
desc: "pod having an init container",
|
||||
@ -1119,102 +1011,7 @@ func TestGetPodResourcesWithInitContainersV1(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "pod having an init container with SidecarContainers enabled",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: podNamespace,
|
||||
UID: podUID,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Name: initContainerName,
|
||||
},
|
||||
},
|
||||
Containers: containers,
|
||||
},
|
||||
},
|
||||
mockFunc: func(
|
||||
pod *v1.Pod,
|
||||
devicesProvider *podresourcetest.MockDevicesProvider,
|
||||
cpusProvider *podresourcetest.MockCPUsProvider,
|
||||
memoryProvider *podresourcetest.MockMemoryProvider,
|
||||
dynamicResourcesProvider *podresourcetest.MockDynamicResourcesProvider) {
|
||||
devicesProvider.EXPECT().UpdateAllocatedDevices().Return().Maybe()
|
||||
devicesProvider.EXPECT().GetDevices(string(podUID), containerName).Return(devs).Maybe()
|
||||
cpusProvider.EXPECT().GetCPUs(string(podUID), containerName).Return(cpus).Maybe()
|
||||
memoryProvider.EXPECT().GetMemory(string(podUID), containerName).Return(memory).Maybe()
|
||||
dynamicResourcesProvider.EXPECT().GetDynamicResources(pod, &pod.Spec.Containers[0]).Return([]*podresourcesapi.DynamicResource{}).Maybe()
|
||||
|
||||
},
|
||||
sidecarContainersEnabled: true,
|
||||
expectedResponse: &podresourcesapi.GetPodResourcesResponse{
|
||||
PodResources: &podresourcesapi.PodResources{
|
||||
Name: podName,
|
||||
Namespace: podNamespace,
|
||||
Containers: []*podresourcesapi.ContainerResources{
|
||||
{
|
||||
Name: containerName,
|
||||
Devices: devs,
|
||||
CpuIds: cpus,
|
||||
Memory: memory,
|
||||
DynamicResources: []*podresourcesapi.DynamicResource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "pod having a restartable init container with SidecarContainers disabled",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: podNamespace,
|
||||
UID: podUID,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Name: initContainerName,
|
||||
RestartPolicy: &containerRestartPolicyAlways,
|
||||
},
|
||||
},
|
||||
Containers: containers,
|
||||
},
|
||||
},
|
||||
mockFunc: func(
|
||||
pod *v1.Pod,
|
||||
devicesProvider *podresourcetest.MockDevicesProvider,
|
||||
cpusProvider *podresourcetest.MockCPUsProvider,
|
||||
memoryProvider *podresourcetest.MockMemoryProvider,
|
||||
dynamicResourcesProvider *podresourcetest.MockDynamicResourcesProvider) {
|
||||
devicesProvider.EXPECT().UpdateAllocatedDevices().Return().Maybe()
|
||||
|
||||
devicesProvider.EXPECT().GetDevices(string(podUID), containerName).Return(devs).Maybe()
|
||||
cpusProvider.EXPECT().GetCPUs(string(podUID), containerName).Return(cpus).Maybe()
|
||||
memoryProvider.EXPECT().GetMemory(string(podUID), containerName).Return(memory).Maybe()
|
||||
dynamicResourcesProvider.EXPECT().GetDynamicResources(pod, &pod.Spec.Containers[0]).Return([]*podresourcesapi.DynamicResource{}).Maybe()
|
||||
|
||||
},
|
||||
expectedResponse: &podresourcesapi.GetPodResourcesResponse{
|
||||
PodResources: &podresourcesapi.PodResources{
|
||||
Name: podName,
|
||||
Namespace: podNamespace,
|
||||
Containers: []*podresourcesapi.ContainerResources{
|
||||
{
|
||||
Name: containerName,
|
||||
Devices: devs,
|
||||
CpuIds: cpus,
|
||||
Memory: memory,
|
||||
DynamicResources: []*podresourcesapi.DynamicResource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "pod having an init container with SidecarContainers enabled",
|
||||
desc: "pod having a restartable init container",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
@ -1250,7 +1047,6 @@ func TestGetPodResourcesWithInitContainersV1(t *testing.T) {
|
||||
dynamicResourcesProvider.EXPECT().GetDynamicResources(pod, &pod.Spec.Containers[0]).Return([]*podresourcesapi.DynamicResource{}).Maybe()
|
||||
|
||||
},
|
||||
sidecarContainersEnabled: true,
|
||||
expectedResponse: &podresourcesapi.GetPodResourcesResponse{
|
||||
PodResources: &podresourcesapi.PodResources{
|
||||
Name: podName,
|
||||
@ -1276,8 +1072,6 @@ func TestGetPodResourcesWithInitContainersV1(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, pkgfeatures.SidecarContainers, tc.sidecarContainersEnabled)
|
||||
|
||||
mockDevicesProvider := podresourcetest.NewMockDevicesProvider(t)
|
||||
mockPodsProvider := podresourcetest.NewMockPodsProvider(t)
|
||||
mockCPUsProvider := podresourcetest.NewMockCPUsProvider(t)
|
||||
|
@ -1625,40 +1625,38 @@ func getPhase(pod *v1.Pod, info []v1.ContainerStatus, podIsTerminal bool) v1.Pod
|
||||
stopped := 0
|
||||
succeeded := 0
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
|
||||
// restartable init containers
|
||||
for _, container := range spec.InitContainers {
|
||||
if !podutil.IsRestartableInitContainer(&container) {
|
||||
// Skip the regular init containers, as they have been handled above.
|
||||
continue
|
||||
}
|
||||
containerStatus, ok := podutil.GetContainerStatus(info, container.Name)
|
||||
if !ok {
|
||||
unknown++
|
||||
continue
|
||||
}
|
||||
// restartable init containers
|
||||
for _, container := range spec.InitContainers {
|
||||
if !podutil.IsRestartableInitContainer(&container) {
|
||||
// Skip the regular init containers, as they have been handled above.
|
||||
continue
|
||||
}
|
||||
containerStatus, ok := podutil.GetContainerStatus(info, container.Name)
|
||||
if !ok {
|
||||
unknown++
|
||||
continue
|
||||
}
|
||||
|
||||
switch {
|
||||
case containerStatus.State.Running != nil:
|
||||
if containerStatus.Started == nil || !*containerStatus.Started {
|
||||
pendingRestartableInitContainers++
|
||||
}
|
||||
running++
|
||||
case containerStatus.State.Terminated != nil:
|
||||
switch {
|
||||
case containerStatus.State.Running != nil:
|
||||
if containerStatus.Started == nil || !*containerStatus.Started {
|
||||
pendingRestartableInitContainers++
|
||||
}
|
||||
running++
|
||||
case containerStatus.State.Terminated != nil:
|
||||
// Do nothing here, as terminated restartable init containers are not
|
||||
// taken into account for the pod phase.
|
||||
case containerStatus.State.Waiting != nil:
|
||||
if containerStatus.LastTerminationState.Terminated != nil {
|
||||
// Do nothing here, as terminated restartable init containers are not
|
||||
// taken into account for the pod phase.
|
||||
case containerStatus.State.Waiting != nil:
|
||||
if containerStatus.LastTerminationState.Terminated != nil {
|
||||
// Do nothing here, as terminated restartable init containers are not
|
||||
// taken into account for the pod phase.
|
||||
} else {
|
||||
pendingRestartableInitContainers++
|
||||
waiting++
|
||||
}
|
||||
default:
|
||||
} else {
|
||||
pendingRestartableInitContainers++
|
||||
unknown++
|
||||
waiting++
|
||||
}
|
||||
default:
|
||||
pendingRestartableInitContainers++
|
||||
unknown++
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2619,7 +2619,6 @@ func TestPodPhaseWithRestartAlwaysRestartableInitContainers(t *testing.T) {
|
||||
"all regular containers succeeded and restartable init container succeeded with restart always, but the pod is terminal",
|
||||
},
|
||||
}
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
|
||||
for _, test := range tests {
|
||||
statusInfo := test.pod.Status.InitContainerStatuses
|
||||
statusInfo = append(statusInfo, test.pod.Status.ContainerStatuses...)
|
||||
@ -2711,7 +2710,6 @@ func TestPodPhaseWithRestartAlwaysAndPodHasRun(t *testing.T) {
|
||||
"regular init container is succeeded, restartable init container and regular containers are both running",
|
||||
},
|
||||
}
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
|
||||
for _, test := range tests {
|
||||
statusInfo := test.pod.Status.InitContainerStatuses
|
||||
statusInfo = append(statusInfo, test.pod.Status.ContainerStatuses...)
|
||||
@ -3115,7 +3113,6 @@ func TestPodPhaseWithRestartNeverRestartableInitContainers(t *testing.T) {
|
||||
"backoff crashloop with non-zero restartable init container, main containers succeeded",
|
||||
},
|
||||
}
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
|
||||
for _, test := range tests {
|
||||
statusInfo := test.pod.Status.InitContainerStatuses
|
||||
statusInfo = append(statusInfo, test.pod.Status.ContainerStatuses...)
|
||||
|
@ -832,8 +832,7 @@ func (m *kubeGenericRuntimeManager) killContainersWithSyncResult(ctx context.Con
|
||||
|
||||
wg.Add(len(runningPod.Containers))
|
||||
var termOrdering *terminationOrdering
|
||||
// we only care about container termination ordering if the sidecars feature is enabled
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) && types.HasRestartableInitContainer(pod) {
|
||||
if types.HasRestartableInitContainer(pod) {
|
||||
var runningContainerNames []string
|
||||
for _, container := range runningPod.Containers {
|
||||
runningContainerNames = append(runningContainerNames, container.Name)
|
||||
@ -930,6 +929,8 @@ func (m *kubeGenericRuntimeManager) purgeInitContainers(ctx context.Context, pod
|
||||
// index of next init container to start, or done if there are no further init containers.
|
||||
// Status is only returned if an init container is failed, in which case next will
|
||||
// point to the current container.
|
||||
// TODO: Remove this function as this is a subset of the
|
||||
// computeInitContainerActions.
|
||||
func findNextInitContainerToRun(pod *v1.Pod, podStatus *kubecontainer.PodStatus) (status *kubecontainer.Status, next *v1.Container, done bool) {
|
||||
if len(pod.Spec.InitContainers) == 0 {
|
||||
return nil, nil, true
|
||||
|
@ -496,6 +496,8 @@ type podActions struct {
|
||||
Attempt uint32
|
||||
|
||||
// The next init container to start.
|
||||
// TODO: Either this or InitContainersToStart will be used. Remove this
|
||||
// field once it is not needed.
|
||||
NextInitContainerToStart *v1.Container
|
||||
// InitContainersToStart keeps a list of indexes for the init containers to
|
||||
// start, where the index is the index of the specific init container in the
|
||||
@ -903,7 +905,7 @@ func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *
|
||||
ContainersToKill: make(map[kubecontainer.ContainerID]containerToKillInfo),
|
||||
}
|
||||
|
||||
handleRestartableInitContainers := utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) && types.HasRestartableInitContainer(pod)
|
||||
handleRestartableInitContainers := types.HasRestartableInitContainer(pod)
|
||||
|
||||
// If we need to (re-)create the pod sandbox, everything will need to be
|
||||
// killed and recreated, and init containers should be purged.
|
||||
@ -934,6 +936,8 @@ func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *
|
||||
// is done and there is no container to start.
|
||||
if len(containersToStart) == 0 {
|
||||
hasInitialized := false
|
||||
// TODO: Remove this code path as logically it is the subset of the next
|
||||
// code path.
|
||||
if !handleRestartableInitContainers {
|
||||
_, _, hasInitialized = findNextInitContainerToRun(pod, podStatus)
|
||||
} else {
|
||||
@ -952,6 +956,8 @@ func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *
|
||||
// state.
|
||||
if len(pod.Spec.InitContainers) != 0 {
|
||||
// Pod has init containers, return the first one.
|
||||
// TODO: Remove this code path as logically it is the subset of the next
|
||||
// code path.
|
||||
if !handleRestartableInitContainers {
|
||||
changes.NextInitContainerToStart = &pod.Spec.InitContainers[0]
|
||||
} else {
|
||||
@ -975,6 +981,8 @@ func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *
|
||||
}
|
||||
|
||||
// Check initialization progress.
|
||||
// TODO: Remove this code path as logically it is the subset of the next
|
||||
// code path.
|
||||
if !handleRestartableInitContainers {
|
||||
initLastStatus, next, done := findNextInitContainerToRun(pod, podStatus)
|
||||
if !done {
|
||||
@ -1095,6 +1103,8 @@ func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *
|
||||
|
||||
if keepCount == 0 && len(changes.ContainersToStart) == 0 {
|
||||
changes.KillPod = true
|
||||
// TODO: Remove this code path as logically it is the subset of the next
|
||||
// code path.
|
||||
if handleRestartableInitContainers {
|
||||
// To prevent the restartable init containers to keep pod alive, we should
|
||||
// not restart them.
|
||||
@ -1353,6 +1363,8 @@ func (m *kubeGenericRuntimeManager) SyncPod(ctx context.Context, pod *v1.Pod, po
|
||||
start(ctx, "ephemeral container", metrics.EphemeralContainer, ephemeralContainerStartSpec(&pod.Spec.EphemeralContainers[idx]))
|
||||
}
|
||||
|
||||
// TODO: Remove this code path as logically it is the subset of the next
|
||||
// code path.
|
||||
if !types.HasRestartableInitContainer(pod) {
|
||||
// Step 6: start the init container.
|
||||
if container := podContainerChanges.NextInitContainerToStart; container != nil {
|
||||
|
@ -1268,16 +1268,6 @@ func verifyActions(t *testing.T, expected, actual *podActions, desc string) {
|
||||
}
|
||||
|
||||
func TestComputePodActionsWithInitContainers(t *testing.T) {
|
||||
t.Run("sidecar containers disabled", func(t *testing.T) {
|
||||
testComputePodActionsWithInitContainers(t, false)
|
||||
})
|
||||
t.Run("sidecar containers enabled", func(t *testing.T) {
|
||||
testComputePodActionsWithInitContainers(t, true)
|
||||
})
|
||||
}
|
||||
|
||||
func testComputePodActionsWithInitContainers(t *testing.T, sidecarContainersEnabled bool) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, sidecarContainersEnabled)
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -1498,7 +1488,7 @@ func testComputePodActionsWithInitContainers(t *testing.T, sidecarContainersEnab
|
||||
}
|
||||
ctx := context.Background()
|
||||
actions := m.computePodActions(ctx, pod, status)
|
||||
handleRestartableInitContainers := sidecarContainersEnabled && kubelettypes.HasRestartableInitContainer(pod)
|
||||
handleRestartableInitContainers := kubelettypes.HasRestartableInitContainer(pod)
|
||||
if !handleRestartableInitContainers {
|
||||
// If sidecar containers are disabled or the pod does not have any
|
||||
// restartable init container, we should not see any
|
||||
@ -1553,7 +1543,6 @@ func makeBasePodAndStatusWithInitContainers() (*v1.Pod, *kubecontainer.PodStatus
|
||||
}
|
||||
|
||||
func TestComputePodActionsWithRestartableInitContainers(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -1973,16 +1962,6 @@ func TestComputePodActionsWithInitAndEphemeralContainers(t *testing.T) {
|
||||
TestComputePodActions(t)
|
||||
TestComputePodActionsWithInitContainers(t)
|
||||
|
||||
t.Run("sidecar containers disabled", func(t *testing.T) {
|
||||
testComputePodActionsWithInitAndEphemeralContainers(t, false)
|
||||
})
|
||||
t.Run("sidecar containers enabled", func(t *testing.T) {
|
||||
testComputePodActionsWithInitAndEphemeralContainers(t, true)
|
||||
})
|
||||
}
|
||||
|
||||
func testComputePodActionsWithInitAndEphemeralContainers(t *testing.T, sidecarContainersEnabled bool) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, sidecarContainersEnabled)
|
||||
_, _, m, err := createTestRuntimeManager()
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -2121,7 +2100,7 @@ func testComputePodActionsWithInitAndEphemeralContainers(t *testing.T, sidecarCo
|
||||
}
|
||||
ctx := context.Background()
|
||||
actions := m.computePodActions(ctx, pod, status)
|
||||
handleRestartableInitContainers := sidecarContainersEnabled && kubelettypes.HasRestartableInitContainer(pod)
|
||||
handleRestartableInitContainers := kubelettypes.HasRestartableInitContainer(pod)
|
||||
if !handleRestartableInitContainers {
|
||||
// If sidecar containers are disabled or the pod does not have any
|
||||
// restartable init container, we should not see any
|
||||
|
@ -21,12 +21,9 @@ import (
|
||||
"runtime"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/component-helpers/scheduling/corev1"
|
||||
"k8s.io/klog/v2"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
@ -139,21 +136,6 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult
|
||||
nodeInfo := schedulerframework.NewNodeInfo(pods...)
|
||||
nodeInfo.SetNode(node)
|
||||
|
||||
// TODO: Remove this after the SidecarContainers feature gate graduates to GA.
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) {
|
||||
for _, c := range admitPod.Spec.InitContainers {
|
||||
if podutil.IsRestartableInitContainer(&c) {
|
||||
message := fmt.Sprintf("Init container %q may not have a non-default restartPolicy", c.Name)
|
||||
klog.InfoS("Failed to admit pod", "pod", klog.KObj(admitPod), "message", message)
|
||||
return PodAdmitResult{
|
||||
Admit: false,
|
||||
Reason: InitContainerRestartPolicyForbidden,
|
||||
Message: message,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ensure the node has enough plugin resources for that required in pods
|
||||
if err = w.pluginResourceUpdateFunc(nodeInfo, attrs); err != nil {
|
||||
message := fmt.Sprintf("Update plugin resources failed due to %v, which is unexpected.", err)
|
||||
|
@ -27,9 +27,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
"k8s.io/kubernetes/pkg/probe"
|
||||
@ -136,34 +133,33 @@ func TestAddRemovePods(t *testing.T) {
|
||||
func TestAddRemovePodsWithRestartableInitContainer(t *testing.T) {
|
||||
m := newTestManager()
|
||||
defer cleanup(t, m)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
|
||||
if err := expectProbes(m, nil); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
probePaths []probeKey
|
||||
enableSidecarContainers bool
|
||||
desc string
|
||||
probePaths []probeKey
|
||||
hasRestartableInitContainer bool
|
||||
}{
|
||||
{
|
||||
desc: "pod with sidecar (no sidecar containers feature enabled)",
|
||||
probePaths: nil,
|
||||
enableSidecarContainers: false,
|
||||
desc: "pod without sidecar",
|
||||
probePaths: nil,
|
||||
hasRestartableInitContainer: false,
|
||||
},
|
||||
{
|
||||
desc: "pod with sidecar (sidecar containers feature enabled)",
|
||||
desc: "pod with sidecar",
|
||||
probePaths: []probeKey{
|
||||
{"restartable_init_container_pod", "restartable-init", liveness},
|
||||
{"restartable_init_container_pod", "restartable-init", readiness},
|
||||
{"restartable_init_container_pod", "restartable-init", startup},
|
||||
},
|
||||
enableSidecarContainers: true,
|
||||
hasRestartableInitContainer: true,
|
||||
},
|
||||
}
|
||||
|
||||
containerRestartPolicy := func(enableSidecarContainers bool) *v1.ContainerRestartPolicy {
|
||||
if !enableSidecarContainers {
|
||||
containerRestartPolicy := func(hasRestartableInitContainer bool) *v1.ContainerRestartPolicy {
|
||||
if !hasRestartableInitContainer {
|
||||
return nil
|
||||
}
|
||||
restartPolicy := v1.ContainerRestartPolicyAlways
|
||||
@ -172,8 +168,6 @@ func TestAddRemovePodsWithRestartableInitContainer(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, tc.enableSidecarContainers)
|
||||
|
||||
probePod := v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: "restartable_init_container_pod",
|
||||
@ -186,7 +180,7 @@ func TestAddRemovePodsWithRestartableInitContainer(t *testing.T) {
|
||||
LivenessProbe: defaultProbe,
|
||||
ReadinessProbe: defaultProbe,
|
||||
StartupProbe: defaultProbe,
|
||||
RestartPolicy: containerRestartPolicy(tc.enableSidecarContainers),
|
||||
RestartPolicy: containerRestartPolicy(tc.hasRestartableInitContainer),
|
||||
}},
|
||||
Containers: []v1.Container{{
|
||||
Name: "main",
|
||||
@ -453,10 +447,10 @@ func TestUpdatePodStatusWithInitContainers(t *testing.T) {
|
||||
m.startupManager.Set(kubecontainer.ParseContainerID(started.ContainerID), results.Success, &v1.Pod{})
|
||||
|
||||
testCases := []struct {
|
||||
desc string
|
||||
expectedStartup map[probeKey]bool
|
||||
expectedReadiness map[probeKey]bool
|
||||
enableSidecarContainers bool
|
||||
desc string
|
||||
expectedStartup map[probeKey]bool
|
||||
expectedReadiness map[probeKey]bool
|
||||
hasRestartableInitContainer bool
|
||||
}{
|
||||
{
|
||||
desc: "init containers",
|
||||
@ -470,10 +464,10 @@ func TestUpdatePodStatusWithInitContainers(t *testing.T) {
|
||||
{testPodUID, started.Name, readiness}: false,
|
||||
{testPodUID, terminated.Name, readiness}: true,
|
||||
},
|
||||
enableSidecarContainers: false,
|
||||
hasRestartableInitContainer: false,
|
||||
},
|
||||
{
|
||||
desc: "init container with SidecarContainers feature",
|
||||
desc: "init container with Always restartPolicy",
|
||||
expectedStartup: map[probeKey]bool{
|
||||
{testPodUID, notStarted.Name, startup}: false,
|
||||
{testPodUID, started.Name, startup}: true,
|
||||
@ -484,7 +478,7 @@ func TestUpdatePodStatusWithInitContainers(t *testing.T) {
|
||||
{testPodUID, started.Name, readiness}: true,
|
||||
{testPodUID, terminated.Name, readiness}: false,
|
||||
},
|
||||
enableSidecarContainers: true,
|
||||
hasRestartableInitContainer: true,
|
||||
},
|
||||
}
|
||||
|
||||
@ -498,7 +492,6 @@ func TestUpdatePodStatusWithInitContainers(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, tc.enableSidecarContainers)
|
||||
podStatus := v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
InitContainerStatuses: []v1.ContainerStatus{
|
||||
@ -514,15 +507,15 @@ func TestUpdatePodStatusWithInitContainers(t *testing.T) {
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Name: notStarted.Name,
|
||||
RestartPolicy: containerRestartPolicy(tc.enableSidecarContainers),
|
||||
RestartPolicy: containerRestartPolicy(tc.hasRestartableInitContainer),
|
||||
},
|
||||
{
|
||||
Name: started.Name,
|
||||
RestartPolicy: containerRestartPolicy(tc.enableSidecarContainers),
|
||||
RestartPolicy: containerRestartPolicy(tc.hasRestartableInitContainer),
|
||||
},
|
||||
{
|
||||
Name: terminated.Name,
|
||||
RestartPolicy: containerRestartPolicy(tc.enableSidecarContainers),
|
||||
RestartPolicy: containerRestartPolicy(tc.hasRestartableInitContainer),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -86,7 +86,7 @@ func GetContainerOOMScoreAdjust(pod *v1.Pod, container *v1.Container, memoryCapa
|
||||
|
||||
// adapt the sidecarContainer memoryRequest for OOM ADJ calculation
|
||||
// calculate the oom score adjustment based on: max-memory( currentSideCarContainer , min-memory(regular containers) ) .
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.SidecarContainers) && isSidecarContainer(pod, container) {
|
||||
if isSidecarContainer(pod, container) {
|
||||
// check min memory quantity in regular containers
|
||||
minMemoryRequest := minRegularContainerMemory(*pod)
|
||||
|
||||
|
@ -653,7 +653,6 @@ type oomTest struct {
|
||||
pod *v1.Pod
|
||||
memoryCapacity int64
|
||||
lowHighOOMScoreAdj map[string]lowHighOOMScoreAdjTest // [container-name] : min and max oom_score_adj score the container should be assigned.
|
||||
sidecarContainersFeatureEnabled bool
|
||||
podLevelResourcesFeatureEnabled bool
|
||||
}
|
||||
|
||||
@ -728,7 +727,6 @@ func TestGetContainerOOMScoreAdjust(t *testing.T) {
|
||||
lowHighOOMScoreAdj: map[string]lowHighOOMScoreAdjTest{
|
||||
"burstable-unique-container": {lowOOMScoreAdj: 875, highOOMScoreAdj: 880},
|
||||
},
|
||||
sidecarContainersFeatureEnabled: true,
|
||||
},
|
||||
"burstable-mixed-unique-main-container-pod": {
|
||||
pod: &burstableMixedUniqueMainContainerPod,
|
||||
@ -737,7 +735,6 @@ func TestGetContainerOOMScoreAdjust(t *testing.T) {
|
||||
"init-container": {lowOOMScoreAdj: 875, highOOMScoreAdj: 880},
|
||||
"main-1": {lowOOMScoreAdj: 875, highOOMScoreAdj: 880},
|
||||
},
|
||||
sidecarContainersFeatureEnabled: true,
|
||||
},
|
||||
"burstable-mixed-multi-container-small-sidecar-pod": {
|
||||
pod: &burstableMixedMultiContainerSmallSidecarPod,
|
||||
@ -747,7 +744,6 @@ func TestGetContainerOOMScoreAdjust(t *testing.T) {
|
||||
"sidecar-small-container": {lowOOMScoreAdj: 875, highOOMScoreAdj: 875},
|
||||
"main-1": {lowOOMScoreAdj: 875, highOOMScoreAdj: 875},
|
||||
},
|
||||
sidecarContainersFeatureEnabled: true,
|
||||
},
|
||||
"burstable-mixed-multi-container-sample-request-pod": {
|
||||
pod: &burstableMixedMultiContainerSameRequestPod,
|
||||
@ -757,7 +753,6 @@ func TestGetContainerOOMScoreAdjust(t *testing.T) {
|
||||
"sidecar-container": {lowOOMScoreAdj: 875, highOOMScoreAdj: 875},
|
||||
"main-1": {lowOOMScoreAdj: 875, highOOMScoreAdj: 875},
|
||||
},
|
||||
sidecarContainersFeatureEnabled: true,
|
||||
},
|
||||
"burstable-mixed-multi-container-big-sidecar-container-pod": {
|
||||
pod: &burstableMixedMultiContainerBigSidecarContainerPod,
|
||||
@ -767,7 +762,6 @@ func TestGetContainerOOMScoreAdjust(t *testing.T) {
|
||||
"sidecar-big-container": {lowOOMScoreAdj: 500, highOOMScoreAdj: 500},
|
||||
"main-1": {lowOOMScoreAdj: 875, highOOMScoreAdj: 875},
|
||||
},
|
||||
sidecarContainersFeatureEnabled: true,
|
||||
},
|
||||
"guaranteed-pod-resources-no-container-resources": {
|
||||
pod: &guaranteedPodResourcesNoContainerResources,
|
||||
@ -831,7 +825,6 @@ func TestGetContainerOOMScoreAdjust(t *testing.T) {
|
||||
},
|
||||
memoryCapacity: 4000000000,
|
||||
podLevelResourcesFeatureEnabled: true,
|
||||
sidecarContainersFeatureEnabled: true,
|
||||
},
|
||||
"burstable-pod-resources-equal-container-requests-with-sidecar": {
|
||||
pod: &burstablePodResourcesEqualContainerRequestsWithSidecar,
|
||||
@ -841,7 +834,6 @@ func TestGetContainerOOMScoreAdjust(t *testing.T) {
|
||||
},
|
||||
memoryCapacity: 4000000000,
|
||||
podLevelResourcesFeatureEnabled: true,
|
||||
sidecarContainersFeatureEnabled: true,
|
||||
},
|
||||
"burstable-pod-resources-unequal-container-requests-with-sidecar": {
|
||||
pod: &burstablePodResourcesUnequalContainerRequestsWithSidecar,
|
||||
@ -852,12 +844,10 @@ func TestGetContainerOOMScoreAdjust(t *testing.T) {
|
||||
},
|
||||
memoryCapacity: 4000000000,
|
||||
podLevelResourcesFeatureEnabled: true,
|
||||
sidecarContainersFeatureEnabled: true,
|
||||
},
|
||||
}
|
||||
for name, test := range oomTests {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, test.sidecarContainersFeatureEnabled)
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodLevelResources, test.podLevelResourcesFeatureEnabled)
|
||||
listContainers := test.pod.Spec.InitContainers
|
||||
listContainers = append(listContainers, test.pod.Spec.Containers...)
|
||||
|
@ -1238,6 +1238,10 @@
|
||||
lockToDefault: false
|
||||
preRelease: Beta
|
||||
version: "1.29"
|
||||
- default: true
|
||||
lockToDefault: true
|
||||
preRelease: GA
|
||||
version: "1.33"
|
||||
- name: SizeMemoryBackedVolumes
|
||||
versionedSpecs:
|
||||
- default: false
|
||||
|
@ -258,7 +258,6 @@ func TestNodeResourcesScoring(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
|
||||
testCtx := initTestSchedulerForNodeResourcesTest(t, tt.strategy)
|
||||
|
||||
for _, n := range tt.nodes {
|
||||
|
Loading…
Reference in New Issue
Block a user