mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
Create ReplicaSetScale test
This commit is contained in:
parent
34f138ff83
commit
c721d8cf65
@ -18,10 +18,14 @@ package apps
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
|
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
@ -114,6 +118,10 @@ var _ = SIGDescribe("ReplicaSet", func() {
|
|||||||
framework.ConformanceIt("should adopt matching pods on creation and release no longer matching pods", func() {
|
framework.ConformanceIt("should adopt matching pods on creation and release no longer matching pods", func() {
|
||||||
testRSAdoptMatchingAndReleaseNotMatching(f)
|
testRSAdoptMatchingAndReleaseNotMatching(f)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
ginkgo.It("Replicaset should have a working scale subresource", func() {
|
||||||
|
testRSScaleSubresources(f)
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
// A basic test to check the deployment of an image using a ReplicaSet. The
|
// A basic test to check the deployment of an image using a ReplicaSet. The
|
||||||
@ -340,3 +348,67 @@ func testRSAdoptMatchingAndReleaseNotMatching(f *framework.Framework) {
|
|||||||
})
|
})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func testRSScaleSubresources(f *framework.Framework) {
|
||||||
|
ns := f.Namespace.Name
|
||||||
|
c := f.ClientSet
|
||||||
|
|
||||||
|
// Create webserver pods.
|
||||||
|
rsPodLabels := map[string]string{
|
||||||
|
"name": "sample-pod",
|
||||||
|
"pod": WebserverImageName,
|
||||||
|
}
|
||||||
|
|
||||||
|
rsName := "test-rs"
|
||||||
|
replicas := int32(1)
|
||||||
|
ginkgo.By(fmt.Sprintf("Creating replica set %q that asks for more than the allowed pod quota", rsName))
|
||||||
|
rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil)
|
||||||
|
_, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs, metav1.CreateOptions{})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
// Verify that the required pods have come up.
|
||||||
|
err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
|
||||||
|
framework.ExpectNoError(err, "error in waiting for pods to come up: %s", err)
|
||||||
|
|
||||||
|
ginkgo.By("getting scale subresource")
|
||||||
|
scale, err := c.AppsV1().ReplicaSets(ns).GetScale(context.TODO(), rsName, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("Failed to get scale subresource: %v", err)
|
||||||
|
}
|
||||||
|
framework.ExpectEqual(scale.Spec.Replicas, int32(1))
|
||||||
|
framework.ExpectEqual(scale.Status.Replicas, int32(1))
|
||||||
|
|
||||||
|
ginkgo.By("updating a scale subresource")
|
||||||
|
scale.ResourceVersion = "" // indicate the scale update should be unconditional
|
||||||
|
scale.Spec.Replicas = 2
|
||||||
|
scaleResult, err := c.AppsV1().ReplicaSets(ns).UpdateScale(context.TODO(), rsName, scale, metav1.UpdateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("Failed to put scale subresource: %v", err)
|
||||||
|
}
|
||||||
|
framework.ExpectEqual(scaleResult.Spec.Replicas, int32(2))
|
||||||
|
|
||||||
|
ginkgo.By("verifying the replicaset Spec.Replicas was modified")
|
||||||
|
rs, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), rsName, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("Failed to get statefulset resource: %v", err)
|
||||||
|
}
|
||||||
|
framework.ExpectEqual(*(rs.Spec.Replicas), int32(2))
|
||||||
|
|
||||||
|
ginkgo.By("Patch a scale subresource")
|
||||||
|
scale.ResourceVersion = "" // indicate the scale update should be unconditional
|
||||||
|
scale.Spec.Replicas = 4 // should be 2 after "UpdateScale" operation, now Patch to 4
|
||||||
|
rsScalePatchPayload, err := json.Marshal(autoscalingv1.Scale{
|
||||||
|
Spec: autoscalingv1.ScaleSpec{
|
||||||
|
Replicas: scale.Spec.Replicas,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
framework.ExpectNoError(err, "Could not Marshal JSON for patch payload")
|
||||||
|
|
||||||
|
_, err = c.AppsV1().ReplicaSets(ns).Patch(context.TODO(), rsName, types.StrategicMergePatchType, []byte(rsScalePatchPayload), metav1.PatchOptions{}, "scale")
|
||||||
|
framework.ExpectNoError(err, "Failed to patch replicaset: %v", err)
|
||||||
|
|
||||||
|
rs, err = c.AppsV1().ReplicaSets(ns).Get(context.TODO(), rsName, metav1.GetOptions{})
|
||||||
|
framework.ExpectNoError(err, "Failed to get replicaset resource: %v", err)
|
||||||
|
framework.ExpectEqual(*(rs.Spec.Replicas), int32(4), "replicaset should have 4 replicas")
|
||||||
|
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user