mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 22:17:14 +00:00
start setting pod metadata.generation
This commit is contained in:
parent
d7774fce9a
commit
d02401dea9
@ -202,6 +202,12 @@ func SetLabels(annos map[string]string) Tweak {
|
||||
}
|
||||
}
|
||||
|
||||
func SetGeneration(gen int64) Tweak {
|
||||
return func(pod *api.Pod) {
|
||||
pod.Generation = gen
|
||||
}
|
||||
}
|
||||
|
||||
func SetSchedulingGates(gates ...api.PodSchedulingGate) Tweak {
|
||||
return func(pod *api.Pod) {
|
||||
pod.Spec.SchedulingGates = gates
|
||||
|
@ -5581,7 +5581,7 @@ func ValidatePodEphemeralContainersUpdate(newPod, oldPod *core.Pod, opts PodVali
|
||||
func ValidatePodResize(newPod, oldPod *core.Pod, opts PodValidationOptions) field.ErrorList {
|
||||
// Part 1: Validate newPod's spec and updates to metadata
|
||||
fldPath := field.NewPath("metadata")
|
||||
allErrs := ValidateImmutableField(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath)
|
||||
allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath)
|
||||
allErrs = append(allErrs, validatePodMetadataAndSpec(newPod, opts)...)
|
||||
|
||||
// pods with pod-level resources cannot be resized
|
||||
|
@ -86,6 +86,7 @@ func (podStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
|
||||
// PrepareForCreate clears fields that are not allowed to be set by end users on creation.
|
||||
func (podStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
|
||||
pod := obj.(*api.Pod)
|
||||
pod.Generation = 1
|
||||
pod.Status = api.PodStatus{
|
||||
Phase: api.PodPending,
|
||||
QOSClass: qos.GetPodQOS(pod),
|
||||
@ -104,6 +105,7 @@ func (podStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object
|
||||
oldPod := old.(*api.Pod)
|
||||
newPod.Status = oldPod.Status
|
||||
podutil.DropDisabledPodFields(newPod, oldPod)
|
||||
updatePodGeneration(newPod, oldPod)
|
||||
}
|
||||
|
||||
// Validate validates a new pod.
|
||||
@ -260,6 +262,7 @@ func (podEphemeralContainersStrategy) PrepareForUpdate(ctx context.Context, obj,
|
||||
|
||||
*newPod = *dropNonEphemeralContainerUpdates(newPod, oldPod)
|
||||
podutil.DropDisabledPodFields(newPod, oldPod)
|
||||
updatePodGeneration(newPod, oldPod)
|
||||
}
|
||||
|
||||
func (podEphemeralContainersStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
||||
@ -334,6 +337,7 @@ func (podResizeStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.
|
||||
*newPod = *dropNonResizeUpdates(newPod, oldPod)
|
||||
podutil.MarkPodProposedForResize(oldPod, newPod)
|
||||
podutil.DropDisabledPodFields(newPod, oldPod)
|
||||
updatePodGeneration(newPod, oldPod)
|
||||
}
|
||||
|
||||
func (podResizeStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
||||
@ -975,3 +979,11 @@ func apparmorFieldForAnnotation(annotation string) *api.AppArmorProfile {
|
||||
// length or if the annotation has an unrecognized value
|
||||
return nil
|
||||
}
|
||||
|
||||
// updatePodGeneration bumps metadata.generation if needed for any updates
|
||||
// to the podspec.
|
||||
func updatePodGeneration(newPod, oldPod *api.Pod) {
|
||||
if !apiequality.Semantic.DeepEqual(newPod.Spec, oldPod.Spec) {
|
||||
newPod.Generation++
|
||||
}
|
||||
}
|
||||
|
@ -942,6 +942,7 @@ func TestEphemeralContainerStrategyValidateUpdate(t *testing.T) {
|
||||
name: "add ephemeral container to regular pod and expect success",
|
||||
oldPod: podtest.MakePod("test-pod",
|
||||
podtest.SetResourceVersion("1"),
|
||||
podtest.SetGeneration(1),
|
||||
),
|
||||
newPod: podtest.MakePod("test-pod",
|
||||
podtest.SetResourceVersion("1"),
|
||||
@ -953,6 +954,7 @@ func TestEphemeralContainerStrategyValidateUpdate(t *testing.T) {
|
||||
TerminationMessagePolicy: "File",
|
||||
},
|
||||
}),
|
||||
podtest.SetGeneration(2),
|
||||
),
|
||||
},
|
||||
}
|
||||
@ -1338,7 +1340,7 @@ func TestNodeInclusionPolicyEnablementInCreating(t *testing.T) {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeInclusionPolicyInPodTopologySpread, tc.enableNodeInclusionPolicy)
|
||||
|
||||
pod := podtest.MakePod("foo")
|
||||
pod := podtest.MakePod("foo", podtest.SetGeneration(1))
|
||||
wantPod := pod.DeepCopy()
|
||||
pod.Spec.TopologySpreadConstraints = append(pod.Spec.TopologySpreadConstraints, tc.topologySpreadConstraints...)
|
||||
|
||||
@ -2470,6 +2472,7 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
},
|
||||
}),
|
||||
)),
|
||||
podtest.SetGeneration(1),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(podtest.MakeContainerStatus("container1",
|
||||
api.ResourceList{
|
||||
@ -2489,6 +2492,7 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
},
|
||||
}),
|
||||
)),
|
||||
podtest.SetGeneration(1),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(podtest.MakeContainerStatus("container1",
|
||||
api.ResourceList{
|
||||
@ -2508,6 +2512,7 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
},
|
||||
}),
|
||||
)),
|
||||
podtest.SetGeneration(1),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(podtest.MakeContainerStatus("container1",
|
||||
api.ResourceList{
|
||||
@ -2530,6 +2535,7 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
},
|
||||
}),
|
||||
)),
|
||||
podtest.SetGeneration(1),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(podtest.MakeContainerStatus("container1",
|
||||
api.ResourceList{
|
||||
@ -2552,6 +2558,7 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
api.ContainerResizePolicy{ResourceName: "memory", RestartPolicy: "RestartContainer"},
|
||||
),
|
||||
)),
|
||||
podtest.SetGeneration(1),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(podtest.MakeContainerStatus("container1",
|
||||
api.ResourceList{
|
||||
@ -2574,6 +2581,7 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
api.ContainerResizePolicy{ResourceName: "memory", RestartPolicy: "RestartContainer"},
|
||||
),
|
||||
)),
|
||||
podtest.SetGeneration(2),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(podtest.MakeContainerStatus("container1",
|
||||
api.ResourceList{
|
||||
@ -2595,6 +2603,7 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
},
|
||||
}),
|
||||
)),
|
||||
podtest.SetGeneration(1),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(podtest.MakeContainerStatus("container1",
|
||||
api.ResourceList{
|
||||
@ -2623,6 +2632,7 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
}),
|
||||
),
|
||||
),
|
||||
podtest.SetGeneration(1),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(podtest.MakeContainerStatus("container1",
|
||||
api.ResourceList{
|
||||
@ -2641,6 +2651,7 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
},
|
||||
}),
|
||||
)),
|
||||
podtest.SetGeneration(1),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(podtest.MakeContainerStatus("container1",
|
||||
api.ResourceList{
|
||||
@ -2662,6 +2673,7 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
},
|
||||
}),
|
||||
)),
|
||||
podtest.SetGeneration(1),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(podtest.MakeContainerStatus("container1",
|
||||
api.ResourceList{
|
||||
@ -2690,6 +2702,7 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
}),
|
||||
),
|
||||
),
|
||||
podtest.SetGeneration(1),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(podtest.MakeContainerStatus("container1",
|
||||
api.ResourceList{
|
||||
@ -2708,6 +2721,7 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
},
|
||||
}),
|
||||
)),
|
||||
podtest.SetGeneration(2),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetResizeStatus(api.PodResizeStatusProposed), // Resize status set
|
||||
podtest.SetContainerStatuses(podtest.MakeContainerStatus("container1",
|
||||
@ -2740,6 +2754,7 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
}),
|
||||
),
|
||||
),
|
||||
podtest.SetGeneration(1),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(
|
||||
podtest.MakeContainerStatus("container1",
|
||||
@ -2775,6 +2790,7 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
}),
|
||||
),
|
||||
),
|
||||
podtest.SetGeneration(1),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(
|
||||
podtest.MakeContainerStatus("container1",
|
||||
@ -2810,6 +2826,7 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
}),
|
||||
),
|
||||
),
|
||||
podtest.SetGeneration(2),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetResizeStatus(api.PodResizeStatusProposed), // Resize status set
|
||||
podtest.SetContainerStatuses(
|
||||
@ -2842,6 +2859,7 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
}),
|
||||
),
|
||||
),
|
||||
podtest.SetGeneration(1),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(
|
||||
podtest.MakeContainerStatus("container1",
|
||||
@ -2865,6 +2883,7 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
}),
|
||||
),
|
||||
),
|
||||
podtest.SetGeneration(1),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(
|
||||
podtest.MakeContainerStatus("container1",
|
||||
@ -2888,6 +2907,7 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
}),
|
||||
),
|
||||
),
|
||||
podtest.SetGeneration(1),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(
|
||||
podtest.MakeContainerStatus("container1",
|
||||
@ -2914,6 +2934,7 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
}),
|
||||
),
|
||||
),
|
||||
podtest.SetGeneration(1),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(
|
||||
podtest.MakeContainerStatus("init-container1",
|
||||
@ -2937,6 +2958,7 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
}),
|
||||
),
|
||||
),
|
||||
podtest.SetGeneration(1),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(
|
||||
podtest.MakeContainerStatus("init-container1",
|
||||
@ -2960,6 +2982,7 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
}),
|
||||
),
|
||||
),
|
||||
podtest.SetGeneration(2),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetResizeStatus(api.PodResizeStatusProposed), // Resize status set
|
||||
podtest.SetContainerStatuses(
|
||||
@ -3030,6 +3053,7 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
}),
|
||||
),
|
||||
),
|
||||
podtest.SetGeneration(1),
|
||||
podtest.SetStatus(podtest.MakePodStatus(
|
||||
podtest.SetResizeStatus(""), // Resize status not set
|
||||
podtest.SetContainerStatuses(
|
||||
@ -3057,3 +3081,229 @@ func TestPodResizePrepareForUpdate(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodGenerationPrepareForCreate(t *testing.T) {
|
||||
testCases := []struct {
|
||||
pod *api.Pod
|
||||
expectedGeneration int64
|
||||
}{
|
||||
{
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "gen-not-set",
|
||||
},
|
||||
},
|
||||
expectedGeneration: 1,
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "gen-custom-set",
|
||||
Generation: 5,
|
||||
},
|
||||
},
|
||||
expectedGeneration: 1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.pod.Name, func(t *testing.T) {
|
||||
Strategy.PrepareForCreate(genericapirequest.NewContext(), tc.pod)
|
||||
actual := tc.pod.Generation
|
||||
if actual != tc.expectedGeneration {
|
||||
t.Errorf("invalid generation for pod %s, expected: %d, actual: %d", tc.pod.Name, tc.expectedGeneration, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodGenerationPrepareForUpdate(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
oldPod *api.Pod
|
||||
newPod *api.Pod
|
||||
expectedGeneration int64
|
||||
}{
|
||||
{
|
||||
description: "pod not updated",
|
||||
oldPod: &api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-not-updated",
|
||||
Generation: 1,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{newContainer("container", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi"))},
|
||||
},
|
||||
},
|
||||
newPod: &api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-not-updated",
|
||||
Generation: 1,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{newContainer("container", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi"))},
|
||||
},
|
||||
},
|
||||
expectedGeneration: 1,
|
||||
},
|
||||
{
|
||||
description: "only metadata change",
|
||||
oldPod: &api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "only-metadata-change",
|
||||
Generation: 1,
|
||||
Annotations: map[string]string{"foo": "bar"},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{newContainer("container", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi"))},
|
||||
},
|
||||
},
|
||||
newPod: &api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "only-metadata-change",
|
||||
Generation: 1,
|
||||
Annotations: map[string]string{"foo": "baz"},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{newContainer("container", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi"))},
|
||||
},
|
||||
},
|
||||
expectedGeneration: 1,
|
||||
},
|
||||
{
|
||||
description: "spec semantically equal",
|
||||
oldPod: &api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "spec-semantically-equal",
|
||||
Generation: 1,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{},
|
||||
},
|
||||
},
|
||||
newPod: &api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "spec-semantically-equal",
|
||||
Generation: 1,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{},
|
||||
},
|
||||
},
|
||||
expectedGeneration: 1,
|
||||
},
|
||||
{
|
||||
description: "tolerations updated",
|
||||
oldPod: &api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "tolerations-updated",
|
||||
Generation: 1,
|
||||
},
|
||||
Spec: api.PodSpec{},
|
||||
},
|
||||
newPod: &api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "tolerations-updated",
|
||||
Generation: 1,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Tolerations: []api.Toleration{{
|
||||
Key: "toleration-key",
|
||||
Value: "toleration-value",
|
||||
}},
|
||||
},
|
||||
},
|
||||
expectedGeneration: 2,
|
||||
},
|
||||
{
|
||||
description: "generation not set",
|
||||
oldPod: &api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "gen-not-set",
|
||||
},
|
||||
Spec: api.PodSpec{},
|
||||
},
|
||||
newPod: &api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "gen-not-set",
|
||||
},
|
||||
Spec: api.PodSpec{},
|
||||
},
|
||||
expectedGeneration: 0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
Strategy.PrepareForUpdate(genericapirequest.NewContext(), tc.newPod, tc.oldPod)
|
||||
actual := tc.newPod.Generation
|
||||
if actual != tc.expectedGeneration {
|
||||
t.Errorf("invalid generation for pod %s, expected: %d, actual: %d", tc.oldPod.Name, tc.expectedGeneration, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEphemeralContainersPrepareForUpdate(t *testing.T) {
|
||||
testCases := []struct {
|
||||
description string
|
||||
oldPod *api.Pod
|
||||
newPod *api.Pod
|
||||
expectedGeneration int64
|
||||
}{
|
||||
{
|
||||
description: "pod not updated",
|
||||
oldPod: &api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-not-updated",
|
||||
Generation: 1,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{newContainer("container", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi"))},
|
||||
},
|
||||
},
|
||||
newPod: &api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-not-updated",
|
||||
Generation: 1,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{newContainer("container", getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi"))},
|
||||
},
|
||||
},
|
||||
expectedGeneration: 1,
|
||||
},
|
||||
{
|
||||
description: "ephemeral containers updated",
|
||||
oldPod: &api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ephemeral-containers-updated",
|
||||
Generation: 1,
|
||||
},
|
||||
Spec: api.PodSpec{},
|
||||
},
|
||||
newPod: &api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ephemeral-containers-updated",
|
||||
Generation: 1,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
EphemeralContainers: []api.EphemeralContainer{{
|
||||
EphemeralContainerCommon: api.EphemeralContainerCommon{Name: "ephemeral-container"},
|
||||
}},
|
||||
},
|
||||
},
|
||||
expectedGeneration: 2,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
EphemeralContainersStrategy.PrepareForUpdate(genericapirequest.NewContext(), tc.newPod, tc.oldPod)
|
||||
actual := tc.newPod.Generation
|
||||
if actual != tc.expectedGeneration {
|
||||
t.Errorf("invalid generation for pod %s, expected: %d, actual: %d", tc.oldPod.Name, tc.expectedGeneration, actual)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -63,6 +63,7 @@ var _ = SIGDescribe("Ephemeral Containers", framework.WithNodeConformance(), fun
|
||||
},
|
||||
},
|
||||
})
|
||||
gomega.Expect(pod.Generation).To(gomega.BeEquivalentTo(1))
|
||||
|
||||
ginkgo.By("adding an ephemeral container")
|
||||
ecName := "debugger"
|
||||
@ -78,6 +79,11 @@ var _ = SIGDescribe("Ephemeral Containers", framework.WithNodeConformance(), fun
|
||||
err := podClient.AddEphemeralContainerSync(ctx, pod, ec, time.Minute)
|
||||
framework.ExpectNoError(err, "Failed to patch ephemeral containers in pod %q", e2epod.FormatPod(pod))
|
||||
|
||||
ginkgo.By("verifying the pod's generation is 2")
|
||||
pod, err = podClient.Get(ctx, pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "failed to query for pod")
|
||||
gomega.Expect(pod.Generation).To(gomega.BeEquivalentTo(2))
|
||||
|
||||
ginkgo.By("checking pod container endpoints")
|
||||
// Can't use anything depending on kubectl here because it's not available in the node test environment
|
||||
output := e2epod.ExecCommandInContainer(f, pod.Name, ecName, "/bin/echo", "marco")
|
||||
@ -110,6 +116,7 @@ var _ = SIGDescribe("Ephemeral Containers", framework.WithNodeConformance(), fun
|
||||
},
|
||||
},
|
||||
})
|
||||
gomega.Expect(pod.Generation).To(gomega.BeEquivalentTo(1))
|
||||
|
||||
ginkgo.By("adding an ephemeral container")
|
||||
ecName := "debugger"
|
||||
@ -125,6 +132,11 @@ var _ = SIGDescribe("Ephemeral Containers", framework.WithNodeConformance(), fun
|
||||
err := podClient.AddEphemeralContainerSync(ctx, pod, ec, time.Minute)
|
||||
framework.ExpectNoError(err, "Failed to patch ephemeral containers in pod %q", e2epod.FormatPod(pod))
|
||||
|
||||
ginkgo.By("verifying the pod's generation is 2")
|
||||
pod, err = podClient.Get(ctx, pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "failed to query for pod")
|
||||
gomega.Expect(pod.Generation).To(gomega.BeEquivalentTo(2))
|
||||
|
||||
ginkgo.By("checking pod container endpoints")
|
||||
// Can't use anything depending on kubectl here because it's not available in the node test environment
|
||||
output := e2epod.ExecCommandInContainer(f, pod.Name, ecName, "/bin/echo", "marco")
|
||||
|
@ -275,17 +275,20 @@ func doPodResizeSchedulerTests(f *framework.Framework) {
|
||||
ginkgo.By(fmt.Sprintf("TEST1: Create pod '%s' that fits the node '%s'", testPod1.Name, node.Name))
|
||||
testPod1 = podClient.CreateSync(ctx, testPod1)
|
||||
gomega.Expect(testPod1.Status.Phase).To(gomega.Equal(v1.PodRunning))
|
||||
gomega.Expect(testPod1.Generation).To(gomega.BeEquivalentTo(1))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("TEST1: Create pod '%s' that won't fit node '%s' with pod '%s' on it", testPod2.Name, node.Name, testPod1.Name))
|
||||
testPod2 = podClient.Create(ctx, testPod2)
|
||||
err = e2epod.WaitForPodNameUnschedulableInNamespace(ctx, f.ClientSet, testPod2.Name, testPod2.Namespace)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(testPod2.Status.Phase).To(gomega.Equal(v1.PodPending))
|
||||
gomega.Expect(testPod2.Generation).To(gomega.BeEquivalentTo(1))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("TEST1: Resize pod '%s' to fit in node '%s'", testPod2.Name, node.Name))
|
||||
testPod2, pErr := f.ClientSet.CoreV1().Pods(testPod2.Namespace).Patch(ctx,
|
||||
testPod2.Name, types.StrategicMergePatchType, []byte(patchTestpod2ToFitNode), metav1.PatchOptions{}, "resize")
|
||||
framework.ExpectNoError(pErr, "failed to patch pod for resize")
|
||||
gomega.Expect(testPod2.Generation).To(gomega.BeEquivalentTo(2))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("TEST1: Verify that pod '%s' is running after resize", testPod2.Name))
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, testPod2))
|
||||
@ -329,11 +332,13 @@ func doPodResizeSchedulerTests(f *framework.Framework) {
|
||||
p3Err := e2epod.WaitForPodNameUnschedulableInNamespace(ctx, f.ClientSet, testPod3.Name, testPod3.Namespace)
|
||||
framework.ExpectNoError(p3Err, "failed to create pod3 or pod3 did not become pending!")
|
||||
gomega.Expect(testPod3.Status.Phase).To(gomega.Equal(v1.PodPending))
|
||||
gomega.Expect(testPod3.Generation).To(gomega.BeEquivalentTo(1))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("TEST2: Resize pod '%s' to make enough space for pod '%s'", testPod1.Name, testPod3.Name))
|
||||
testPod1, p1Err := f.ClientSet.CoreV1().Pods(testPod1.Namespace).Patch(ctx,
|
||||
testPod1.Name, types.StrategicMergePatchType, []byte(patchTestpod1ToMakeSpaceForPod3), metav1.PatchOptions{}, "resize")
|
||||
framework.ExpectNoError(p1Err, "failed to patch pod for resize")
|
||||
gomega.Expect(testPod1.Generation).To(gomega.BeEquivalentTo(2))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("TEST2: Verify pod '%s' is running after successfully resizing pod '%s'", testPod3.Name, testPod1.Name))
|
||||
framework.Logf("TEST2: Pod '%s' CPU requests '%dm'", testPod1.Name, testPod1.Spec.Containers[0].Resources.Requests.Cpu().MilliValue())
|
||||
|
@ -415,6 +415,147 @@ var _ = SIGDescribe("Pods Extended", func() {
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Describe("Pod Generation", func() {
|
||||
var podClient *e2epod.PodClient
|
||||
ginkgo.BeforeEach(func() {
|
||||
podClient = e2epod.NewPodClient(f)
|
||||
})
|
||||
|
||||
ginkgo.It("pod generation should start at 1 and increment per update", func(ctx context.Context) {
|
||||
ginkgo.By("creating the pod")
|
||||
podName := "pod-generation-" + string(uuid.NewUUID())
|
||||
pod := e2epod.NewAgnhostPod(f.Namespace.Name, podName, nil, nil, nil)
|
||||
pod.Spec.InitContainers = []v1.Container{{
|
||||
Name: "init-container",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
}}
|
||||
|
||||
ginkgo.By("submitting the pod to kubernetes")
|
||||
pod = podClient.CreateSync(ctx, pod)
|
||||
gomega.Expect(pod.Generation).To(gomega.BeEquivalentTo(1))
|
||||
ginkgo.DeferCleanup(func(ctx context.Context) error {
|
||||
ginkgo.By("deleting the pod")
|
||||
return podClient.Delete(ctx, pod.Name, metav1.DeleteOptions{})
|
||||
})
|
||||
|
||||
ginkgo.By("verifying pod generation bumps as expected")
|
||||
tests := []struct {
|
||||
name string
|
||||
updateFn func(*v1.Pod)
|
||||
expectGenerationBump bool
|
||||
}{
|
||||
{
|
||||
name: "empty update",
|
||||
updateFn: func(pod *v1.Pod) {},
|
||||
expectGenerationBump: false,
|
||||
},
|
||||
|
||||
{
|
||||
name: "updating Tolerations to trigger generation bump",
|
||||
updateFn: func(pod *v1.Pod) {
|
||||
pod.Spec.Tolerations = []v1.Toleration{
|
||||
{
|
||||
Key: "foo-" + string(uuid.NewUUID()),
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "bar",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
},
|
||||
expectGenerationBump: true,
|
||||
},
|
||||
|
||||
{
|
||||
name: "updating ActiveDeadlineSeconds to trigger generation bump",
|
||||
updateFn: func(pod *v1.Pod) {
|
||||
int5000 := int64(5000)
|
||||
pod.Spec.ActiveDeadlineSeconds = &int5000
|
||||
},
|
||||
expectGenerationBump: true,
|
||||
},
|
||||
|
||||
{
|
||||
name: "updating container image to trigger generation bump",
|
||||
updateFn: func(pod *v1.Pod) {
|
||||
pod.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.Nginx)
|
||||
},
|
||||
expectGenerationBump: true,
|
||||
},
|
||||
|
||||
{
|
||||
name: "updating initContainer image to trigger generation bump",
|
||||
updateFn: func(pod *v1.Pod) {
|
||||
pod.Spec.InitContainers[0].Image = imageutils.GetE2EImage(imageutils.Pause)
|
||||
},
|
||||
expectGenerationBump: true,
|
||||
},
|
||||
|
||||
{
|
||||
name: "updates to pod metadata should not trigger generation bump",
|
||||
updateFn: func(pod *v1.Pod) {
|
||||
pod.SetAnnotations(map[string]string{"key": "value"})
|
||||
},
|
||||
expectGenerationBump: false,
|
||||
},
|
||||
|
||||
{
|
||||
name: "pod generation updated by client should be ignored",
|
||||
updateFn: func(pod *v1.Pod) {
|
||||
pod.SetGeneration(1)
|
||||
},
|
||||
expectGenerationBump: false,
|
||||
},
|
||||
}
|
||||
|
||||
expectedPodGeneration := 1
|
||||
for _, test := range tests {
|
||||
ginkgo.By(test.name)
|
||||
podClient.Update(ctx, podName, test.updateFn)
|
||||
pod, err := podClient.Get(ctx, podName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "failed to query for pod")
|
||||
if test.expectGenerationBump {
|
||||
expectedPodGeneration++
|
||||
}
|
||||
gomega.Expect(pod.Generation).To(gomega.BeEquivalentTo(expectedPodGeneration))
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("custom-set generation on new pods should be overwritten to 1", func(ctx context.Context) {
|
||||
ginkgo.By("creating the pod")
|
||||
name := "pod-generation-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
pod := e2epod.NewAgnhostPod(f.Namespace.Name, name, nil, nil, nil)
|
||||
pod.ObjectMeta.Labels = map[string]string{
|
||||
"time": value,
|
||||
}
|
||||
pod.SetGeneration(100)
|
||||
|
||||
ginkgo.By("submitting the pod to kubernetes")
|
||||
pod = podClient.CreateSync(ctx, pod)
|
||||
|
||||
ginkgo.By("verifying the new pod's generation is 1")
|
||||
gomega.Expect(pod.Generation).To(gomega.BeEquivalentTo(1))
|
||||
|
||||
ginkgo.By("issue a graceful delete to trigger generation bump")
|
||||
// We need to wait for the pod to be running, otherwise the deletion
|
||||
// may be carried out immediately rather than gracefully.
|
||||
framework.ExpectNoError(e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name))
|
||||
pod, err := podClient.Get(ctx, pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "failed to GET scheduled pod")
|
||||
|
||||
var lastPod v1.Pod
|
||||
var statusCode int
|
||||
// Set gracePeriodSeconds to 60 to give us time to verify the generation bump.
|
||||
err = f.ClientSet.CoreV1().RESTClient().Delete().AbsPath("/api/v1/namespaces", pod.Namespace, "pods", pod.Name).Param("gracePeriodSeconds", "60").Do(ctx).StatusCode(&statusCode).Into(&lastPod)
|
||||
framework.ExpectNoError(err, "failed to use http client to send delete")
|
||||
gomega.Expect(statusCode).To(gomega.Equal(http.StatusOK), "failed to delete gracefully by client request")
|
||||
|
||||
ginkgo.By("verifying the pod generation was bumped")
|
||||
pod, err = podClient.Get(ctx, pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "failed to query for pod")
|
||||
gomega.Expect(pod.Generation).To(gomega.BeEquivalentTo(2))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func createAndTestPodRepeatedly(ctx context.Context, workers, iterations int, scenario podScenario, podClient v1core.PodInterface) {
|
||||
|
Loading…
Reference in New Issue
Block a user