mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Deflake TestSubresourcePatch
This commit is contained in:
parent
ebbbc57540
commit
6ca6565274
@ -772,34 +772,35 @@ func TestSubresourcePatch(t *testing.T) {
|
|||||||
|
|
||||||
expectInt64(t, patchedNoxuInstance.UnstructuredContent(), 999, "status", "num") // .status.num should be 999
|
expectInt64(t, patchedNoxuInstance.UnstructuredContent(), 999, "status", "num") // .status.num should be 999
|
||||||
expectInt64(t, patchedNoxuInstance.UnstructuredContent(), 10, "spec", "num") // .spec.num should remain 10
|
expectInt64(t, patchedNoxuInstance.UnstructuredContent(), 10, "spec", "num") // .spec.num should remain 10
|
||||||
rv, found, err := unstructured.NestedString(patchedNoxuInstance.UnstructuredContent(), "metadata", "resourceVersion")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if !found {
|
|
||||||
t.Fatalf("metadata.resourceVersion not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
// this call waits for the resourceVersion to be reached in the cache before returning.
|
|
||||||
// We need to do this because the patch gets its initial object from the storage, and the cache serves that.
|
|
||||||
// If it is out of date, then our initial patch is applied to an old resource version, which conflicts
|
|
||||||
// and then the updated object shows a conflicting diff, which permanently fails the patch.
|
|
||||||
// This gives expected stability in the patch without retrying on an known number of conflicts below in the test.
|
|
||||||
// See https://issue.k8s.io/42644
|
|
||||||
_, err = noxuResourceClient.Get(context.TODO(), "foo", metav1.GetOptions{ResourceVersion: patchedNoxuInstance.GetResourceVersion()})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// no-op patch
|
// no-op patch
|
||||||
t.Logf("Patching .status.num again to 999")
|
rv := ""
|
||||||
patchedNoxuInstance, err = noxuResourceClient.Patch(context.TODO(), "foo", types.MergePatchType, patch, metav1.PatchOptions{}, "status")
|
found := false
|
||||||
if err != nil {
|
// TODO: remove this retry once http://issue.k8s.io/75564 is resolved, and expect the resourceVersion to remain unchanged 100% of the time.
|
||||||
t.Fatalf("unexpected error: %v", err)
|
// server-side-apply incorrectly considers spec fields in patches submitted to /status when updating managedFields timestamps, so this patch is racy:
|
||||||
|
// if it spans a 1-second boundary from the last write, server-side-apply updates the managedField timestamp and increments resourceVersion.
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
rv, found, err = unstructured.NestedString(patchedNoxuInstance.UnstructuredContent(), "metadata", "resourceVersion")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if !found {
|
||||||
|
t.Fatalf("metadata.resourceVersion not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Logf("Patching .status.num again to 999")
|
||||||
|
patchedNoxuInstance, err = noxuResourceClient.Patch(context.TODO(), "foo", types.MergePatchType, patch, metav1.PatchOptions{}, "status")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
// make sure no-op patch does not increment resourceVersion
|
||||||
|
expectInt64(t, patchedNoxuInstance.UnstructuredContent(), 999, "status", "num")
|
||||||
|
expectInt64(t, patchedNoxuInstance.UnstructuredContent(), 10, "spec", "num")
|
||||||
|
if patchedNoxuInstance.GetResourceVersion() == rv {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
t.Logf("resource version changed - retrying")
|
||||||
}
|
}
|
||||||
// make sure no-op patch does not increment resourceVersion
|
|
||||||
expectInt64(t, patchedNoxuInstance.UnstructuredContent(), 999, "status", "num")
|
|
||||||
expectInt64(t, patchedNoxuInstance.UnstructuredContent(), 10, "spec", "num")
|
|
||||||
expectString(t, patchedNoxuInstance.UnstructuredContent(), rv, "metadata", "resourceVersion")
|
expectString(t, patchedNoxuInstance.UnstructuredContent(), rv, "metadata", "resourceVersion")
|
||||||
|
|
||||||
// empty patch
|
// empty patch
|
||||||
@ -831,17 +832,6 @@ func TestSubresourcePatch(t *testing.T) {
|
|||||||
t.Fatalf("metadata.resourceVersion not found")
|
t.Fatalf("metadata.resourceVersion not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
// this call waits for the resourceVersion to be reached in the cache before returning.
|
|
||||||
// We need to do this because the patch gets its initial object from the storage, and the cache serves that.
|
|
||||||
// If it is out of date, then our initial patch is applied to an old resource version, which conflicts
|
|
||||||
// and then the updated object shows a conflicting diff, which permanently fails the patch.
|
|
||||||
// This gives expected stability in the patch without retrying on an known number of conflicts below in the test.
|
|
||||||
// See https://issue.k8s.io/42644
|
|
||||||
_, err = noxuResourceClient.Get(context.TODO(), "foo", metav1.GetOptions{ResourceVersion: patchedNoxuInstance.GetResourceVersion()})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scale.Spec.Replicas = 7 but Scale.Status.Replicas should remain 0
|
// Scale.Spec.Replicas = 7 but Scale.Status.Replicas should remain 0
|
||||||
gottenScale, err := scaleClient.Scales("not-the-default").Get(context.TODO(), groupResource, "foo", metav1.GetOptions{})
|
gottenScale, err := scaleClient.Scales("not-the-default").Get(context.TODO(), groupResource, "foo", metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
Loading…
Reference in New Issue
Block a user