mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 15:05:27 +00:00
test(cr::update): add corresponding tests
Signed-off-by: knight42 <anonymousknight96@gmail.com>
This commit is contained in:
parent
34de08a3c9
commit
da246010c3
@ -17,6 +17,8 @@ limitations under the License.
|
||||
package customresource_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
@ -526,6 +528,184 @@ func TestScaleUpdateWithoutSpecReplicas(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestScaleUpdateWithoutResourceVersion(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.CustomResource.Store.DestroyFunc()
|
||||
|
||||
name := "foo"
|
||||
|
||||
var cr unstructured.Unstructured
|
||||
ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), metav1.NamespaceDefault)
|
||||
key := "/noxus/" + metav1.NamespaceDefault + "/" + name
|
||||
if err := storage.CustomResource.Storage.Create(ctx, key, &validCustomResource, &cr, 0, false); err != nil {
|
||||
t.Fatalf("error setting new custom resource (key: %s) %v: %v", key, validCustomResource, err)
|
||||
}
|
||||
|
||||
replicas := int32(8)
|
||||
update := autoscalingv1.Scale{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: autoscalingv1.ScaleSpec{
|
||||
Replicas: replicas,
|
||||
},
|
||||
}
|
||||
|
||||
if _, _, err := storage.Scale.Update(ctx, update.Name, rest.DefaultUpdatedObjectInfo(&update), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("error updating scale %v: %v", update, err)
|
||||
}
|
||||
|
||||
obj, err := storage.Scale.Get(ctx, name, &metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("error fetching scale for %s: %v", name, err)
|
||||
}
|
||||
scale := obj.(*autoscalingv1.Scale)
|
||||
if scale.Spec.Replicas != replicas {
|
||||
t.Errorf("wrong replicas count: expected: %d got: %d", replicas, scale.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
|
||||
func TestScaleUpdateWithoutResourceVersionWithConflicts(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.CustomResource.Store.DestroyFunc()
|
||||
|
||||
name := "foo"
|
||||
|
||||
var cr unstructured.Unstructured
|
||||
ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), metav1.NamespaceDefault)
|
||||
key := "/noxus/" + metav1.NamespaceDefault + "/" + name
|
||||
if err := storage.CustomResource.Storage.Create(ctx, key, &validCustomResource, &cr, 0, false); err != nil {
|
||||
t.Fatalf("error setting new custom resource (key: %s) %v: %v", key, validCustomResource, err)
|
||||
}
|
||||
|
||||
fetchObject := func(name string) (*unstructured.Unstructured, error) {
|
||||
gotObj, err := storage.CustomResource.Get(ctx, name, &metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching custom resource %s: %v", name, err)
|
||||
}
|
||||
return gotObj.(*unstructured.Unstructured), nil
|
||||
}
|
||||
|
||||
applyPatch := func(labelName, labelValue string) rest.TransformFunc {
|
||||
return func(_ context.Context, _, currentObject runtime.Object) (objToUpdate runtime.Object, patchErr error) {
|
||||
o := currentObject.(metav1.Object)
|
||||
o.SetLabels(map[string]string{
|
||||
labelName: labelValue,
|
||||
})
|
||||
return currentObject, nil
|
||||
}
|
||||
}
|
||||
|
||||
errs := make(chan error, 1)
|
||||
rounds := 100
|
||||
go func() {
|
||||
// continuously submits a patch that updates a label and verifies the label update was effective
|
||||
labelName := "timestamp"
|
||||
for i := 0; i < rounds; i++ {
|
||||
expectedLabelValue := fmt.Sprint(i)
|
||||
update, err := fetchObject(name)
|
||||
if err != nil {
|
||||
errs <- err
|
||||
return
|
||||
}
|
||||
setNestedField(update, expectedLabelValue, "metadata", "labels", labelName)
|
||||
if _, _, err := storage.CustomResource.Update(ctx, name, rest.DefaultUpdatedObjectInfo(nil, applyPatch(labelName, fmt.Sprint(i))), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{}); err != nil {
|
||||
|
||||
errs <- fmt.Errorf("error updating custom resource label: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
gotObj, err := fetchObject(name)
|
||||
if err != nil {
|
||||
errs <- err
|
||||
return
|
||||
}
|
||||
gotLabelValue, _, err := unstructured.NestedString(gotObj.Object, "metadata", "labels", labelName)
|
||||
if err != nil {
|
||||
errs <- fmt.Errorf("error getting label %s of custom resource %s: %v", labelName, name, err)
|
||||
return
|
||||
}
|
||||
if gotLabelValue != expectedLabelValue {
|
||||
errs <- fmt.Errorf("wrong label value: expected: %s, got: %s", expectedLabelValue, gotLabelValue)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
replicas := int32(0)
|
||||
update := autoscalingv1.Scale{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
}
|
||||
// continuously submits a scale update without a resourceVersion for a monotonically increasing replica value
|
||||
// and verifies the scale update was effective
|
||||
for i := 0; i < rounds; i++ {
|
||||
select {
|
||||
case err := <-errs:
|
||||
t.Fatal(err)
|
||||
default:
|
||||
replicas++
|
||||
update.Spec.Replicas = replicas
|
||||
if _, _, err := storage.Scale.Update(ctx, update.Name, rest.DefaultUpdatedObjectInfo(&update), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("error updating scale %v: %v", update, err)
|
||||
}
|
||||
|
||||
obj, err := storage.Scale.Get(ctx, name, &metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("error fetching scale for %s: %v", name, err)
|
||||
}
|
||||
scale := obj.(*autoscalingv1.Scale)
|
||||
if scale.Spec.Replicas != replicas {
|
||||
t.Errorf("wrong replicas count: expected: %d got: %d", replicas, scale.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestScaleUpdateWithResourceVersionWithConflicts(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.CustomResource.Store.DestroyFunc()
|
||||
|
||||
name := "foo"
|
||||
|
||||
var cr unstructured.Unstructured
|
||||
ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), metav1.NamespaceDefault)
|
||||
key := "/noxus/" + metav1.NamespaceDefault + "/" + name
|
||||
if err := storage.CustomResource.Storage.Create(ctx, key, &validCustomResource, &cr, 0, false); err != nil {
|
||||
t.Fatalf("error setting new custom resource (key: %s) %v: %v", key, validCustomResource, err)
|
||||
}
|
||||
|
||||
obj, err := storage.Scale.Get(ctx, name, &metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("error fetching scale for %s: %v", name, err)
|
||||
}
|
||||
scale, ok := obj.(*autoscalingv1.Scale)
|
||||
if !ok {
|
||||
t.Fatalf("%v is not of the type autoscalingv1.Scale", scale)
|
||||
}
|
||||
|
||||
replicas := int32(12)
|
||||
update := autoscalingv1.Scale{
|
||||
ObjectMeta: scale.ObjectMeta,
|
||||
Spec: autoscalingv1.ScaleSpec{
|
||||
Replicas: replicas,
|
||||
},
|
||||
}
|
||||
update.ResourceVersion = "1"
|
||||
|
||||
_, _, err = storage.Scale.Update(ctx, update.Name, rest.DefaultUpdatedObjectInfo(&update), rest.ValidateAllObjectFunc, rest.ValidateAllObjectUpdateFunc, false, &metav1.UpdateOptions{})
|
||||
if err == nil {
|
||||
t.Fatal("expecting an update conflict error")
|
||||
}
|
||||
if !errors.IsConflict(err) {
|
||||
t.Fatalf("unexpected error, expecting an update conflict but got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func setSpecReplicas(u *unstructured.Unstructured, replicas int64) {
|
||||
setNestedField(u, replicas, "spec", "replicas")
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user