Create ReplicaSetScale test

This commit is contained in:
Riaan Kleinhans 2021-02-03 10:34:28 +13:00
parent 34f138ff83
commit c721d8cf65

View File

@ -18,10 +18,14 @@ package apps
import (
"context"
"encoding/json"
"fmt"
"time"
appsv1 "k8s.io/api/apps/v1"
autoscalingv1 "k8s.io/api/autoscaling/v1"
"k8s.io/apimachinery/pkg/types"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
@ -114,6 +118,10 @@ var _ = SIGDescribe("ReplicaSet", func() {
framework.ConformanceIt("should adopt matching pods on creation and release no longer matching pods", func() {
testRSAdoptMatchingAndReleaseNotMatching(f)
})
ginkgo.It("Replicaset should have a working scale subresource", func() {
testRSScaleSubresources(f)
})
})
// A basic test to check the deployment of an image using a ReplicaSet. The
@ -340,3 +348,67 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
})
framework.ExpectNoError(err)
}
func testRSScaleSubresources(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
// Create webserver pods.
rsPodLabels := map[string]string{
"name": "sample-pod",
"pod": WebserverImageName,
}
rsName := "test-rs"
replicas := int32(1)
ginkgo.By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", rsName))
rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil)
_, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs, metav1.CreateOptions{})
framework.ExpectNoError(err)
// Verify that the required pods have come up.
err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err)
ginkgo.By("getting scale subresource")
scale, err := c.AppsV1().ReplicaSets(ns).GetScale(context.TODO(), rsName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get scale subresource: %v", err)
}
framework.ExpectEqual(scale.Spec.Replicas, int32(1))
framework.ExpectEqual(scale.Status.Replicas, int32(1))
ginkgo.By("updating a scale subresource")
scale.ResourceVersion = "" // indicate the scale update should be unconditional
scale.Spec.Replicas = 2
scaleResult, err := c.AppsV1().ReplicaSets(ns).UpdateScale(context.TODO(), rsName, scale, metav1.UpdateOptions{})
if err != nil {
framework.Failf("Failed to put scale subresource: %v", err)
}
framework.ExpectEqual(scaleResult.Spec.Replicas, int32(2))
ginkgo.By("verifying the replicaset Spec.Replicas was modified")
rs, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), rsName, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get statefulset resource: %v", err)
}
framework.ExpectEqual(*(rs.Spec.Replicas), int32(2))
ginkgo.By("Patch a scale subresource")
scale.ResourceVersion = "" // indicate the scale update should be unconditional
scale.Spec.Replicas = 4 // should be 2 after "UpdateScale" operation, now Patch to 4
rsScalePatchPayload, err := json.Marshal(autoscalingv1.Scale{
Spec: autoscalingv1.ScaleSpec{
Replicas: scale.Spec.Replicas,
},
})
framework.ExpectNoError(err, "Could not Marshal JSON for patch payload")
_, err = c.AppsV1().ReplicaSets(ns).Patch(context.TODO(), rsName, types.StrategicMergePatchType, []byte(rsScalePatchPayload), metav1.PatchOptions{}, "scale")
framework.ExpectNoError(err, "Failed to patch replicaset: %v", err)
rs, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), rsName, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get replicaset resource: %v", err)
framework.ExpectEqual(*(rs.Spec.Replicas), int32(4), "replicaset should have 4 replicas")
}