mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Merge pull request #98988 from ii/Riaankl_DeploymentScale
Write Read, Replace and Patch DeploymentScale test +3 endpoints
This commit is contained in:
commit
67fbf4172d
@ -18,6 +18,7 @@ package apps
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"time"
|
"time"
|
||||||
@ -28,8 +29,8 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
|
|
||||||
"encoding/json"
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
|
autoscalingv1 "k8s.io/api/autoscaling/v1" //Added new
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
@ -132,6 +133,9 @@ var _ = SIGDescribe("Deployment", func() {
|
|||||||
ginkgo.It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func() {
|
ginkgo.It("test Deployment ReplicaSet orphaning and adoption regarding controllerRef", func() {
|
||||||
testDeploymentsControllerRef(f)
|
testDeploymentsControllerRef(f)
|
||||||
})
|
})
|
||||||
|
ginkgo.It("Deployment should have a working scale subresource", func() {
|
||||||
|
testDeploymentSubresources(f)
|
||||||
|
})
|
||||||
/*
|
/*
|
||||||
Release: v1.12
|
Release: v1.12
|
||||||
Testname: Deployment Proportional Scaling
|
Testname: Deployment Proportional Scaling
|
||||||
@ -1476,3 +1480,66 @@ func waitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *appsv1.D
|
|||||||
func waitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64) error {
|
func waitForDeploymentUpdatedReplicasGTE(c clientset.Interface, ns, deploymentName string, minUpdatedReplicas int32, desiredGeneration int64) error {
|
||||||
return testutil.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, minUpdatedReplicas, desiredGeneration, poll, pollLongTimeout)
|
return testutil.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, minUpdatedReplicas, desiredGeneration, poll, pollLongTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Deployment should have a working scale subresource
|
||||||
|
func testDeploymentSubresources(f *framework.Framework) {
|
||||||
|
ns := f.Namespace.Name
|
||||||
|
c := f.ClientSet
|
||||||
|
|
||||||
|
deploymentName := "test-new-deployment"
|
||||||
|
framework.Logf("Creating simple deployment %s", deploymentName)
|
||||||
|
d := e2edeployment.NewDeployment("test-new-deployment", int32(1), map[string]string{"name": WebserverImageName}, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
|
||||||
|
deploy, err := c.AppsV1().Deployments(ns).Create(context.TODO(), d, metav1.CreateOptions{})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
// Wait for it to be updated to revision 1
|
||||||
|
err = e2edeployment.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", WebserverImage)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
err = e2edeployment.WaitForDeploymentComplete(c, deploy)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
_, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
ginkgo.By("getting scale subresource")
|
||||||
|
scale, err := c.AppsV1().Deployments(ns).GetScale(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("Failed to get scale subresource: %v", err)
|
||||||
|
}
|
||||||
|
framework.ExpectEqual(scale.Spec.Replicas, int32(1))
|
||||||
|
framework.ExpectEqual(scale.Status.Replicas, int32(1))
|
||||||
|
|
||||||
|
ginkgo.By("updating a scale subresource")
|
||||||
|
scale.ResourceVersion = "" // indicate the scale update should be unconditional
|
||||||
|
scale.Spec.Replicas = 2
|
||||||
|
scaleResult, err := c.AppsV1().Deployments(ns).UpdateScale(context.TODO(), deploymentName, scale, metav1.UpdateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("Failed to put scale subresource: %v", err)
|
||||||
|
}
|
||||||
|
framework.ExpectEqual(scaleResult.Spec.Replicas, int32(2))
|
||||||
|
|
||||||
|
ginkgo.By("verifying the deployment Spec.Replicas was modified")
|
||||||
|
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("Failed to get deployment resource: %v", err)
|
||||||
|
}
|
||||||
|
framework.ExpectEqual(*(deployment.Spec.Replicas), int32(2))
|
||||||
|
|
||||||
|
ginkgo.By("Patch a scale subresource")
|
||||||
|
scale.ResourceVersion = "" // indicate the scale update should be unconditional
|
||||||
|
scale.Spec.Replicas = 4 // should be 2 after "UpdateScale" operation, now Patch to 4
|
||||||
|
deploymentScalePatchPayload, err := json.Marshal(autoscalingv1.Scale{
|
||||||
|
Spec: autoscalingv1.ScaleSpec{
|
||||||
|
Replicas: scale.Spec.Replicas,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
framework.ExpectNoError(err, "Could not Marshal JSON for patch payload")
|
||||||
|
|
||||||
|
_, err = c.AppsV1().Deployments(ns).Patch(context.TODO(), deploymentName, types.StrategicMergePatchType, []byte(deploymentScalePatchPayload), metav1.PatchOptions{}, "scale")
|
||||||
|
framework.ExpectNoError(err, "Failed to patch deployment: %v", err)
|
||||||
|
|
||||||
|
deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
|
||||||
|
framework.ExpectNoError(err, "Failed to get deployment resource: %v", err)
|
||||||
|
framework.ExpectEqual(*(deployment.Spec.Replicas), int32(4), "deployment should have 4 replicas")
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user