mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
Create e2e Replicaset status lifecycle test
e2e test validates the following 3 extra endpoints - replaceAppsV1NamespacedReplicaSetStatus - readAppsV1NamespacedReplicaSetStatus - patchAppsV1NamespacedReplicaSetStatus
This commit is contained in:
parent
7705b300e2
commit
31f30383d8
@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
@ -28,12 +29,14 @@ import (
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
"k8s.io/client-go/util/retry"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -47,6 +50,10 @@ import (
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
rsRetryTimeout = 2 * time.Minute
|
||||
)
|
||||
|
||||
func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageName string, image string, args []string) *appsv1.ReplicaSet {
|
||||
zero := int64(0)
|
||||
return &appsv1.ReplicaSet{
|
||||
@ -151,6 +158,9 @@ var _ = SIGDescribe("ReplicaSet", func() {
|
||||
|
||||
})
|
||||
|
||||
ginkgo.It("should validate Replicaset Status endpoints", func() {
|
||||
testRSStatus(f)
|
||||
})
|
||||
})
|
||||
|
||||
// A basic test to check the deployment of an image using a ReplicaSet. The
|
||||
@ -576,3 +586,135 @@ func listRSDeleteCollection(f *framework.Framework) {
|
||||
framework.ExpectNoError(err, "failed to list ReplicaSets")
|
||||
framework.ExpectEqual(len(rsList.Items), 0, "filtered list should have no replicas")
|
||||
}
|
||||
|
||||
func testRSStatus(f *framework.Framework) {
|
||||
ns := f.Namespace.Name
|
||||
c := f.ClientSet
|
||||
rsClient := c.AppsV1().ReplicaSets(ns)
|
||||
|
||||
// Define ReplicaSet Labels
|
||||
rsPodLabels := map[string]string{
|
||||
"name": "sample-pod",
|
||||
"pod": WebserverImageName,
|
||||
}
|
||||
labelSelector := labels.SelectorFromSet(rsPodLabels).String()
|
||||
|
||||
rsName := "test-rs"
|
||||
replicas := int32(1)
|
||||
|
||||
w := &cache.ListWatch{
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = labelSelector
|
||||
return rsClient.Watch(context.TODO(), options)
|
||||
},
|
||||
}
|
||||
rsList, err := c.AppsV1().ReplicaSets("").List(context.TODO(), metav1.ListOptions{LabelSelector: labelSelector})
|
||||
framework.ExpectNoError(err, "failed to list Replicasets")
|
||||
|
||||
ginkgo.By("Create a Replicaset")
|
||||
rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil)
|
||||
testReplicaSet, err := c.AppsV1().ReplicaSets(ns).Create(context.TODO(), rs, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Verify that the required pods have come up.")
|
||||
err = e2epod.VerifyPodsRunning(c, ns, "sample-pod", false, replicas)
|
||||
framework.ExpectNoError(err, "Failed to create pods: %s", err)
|
||||
|
||||
ginkgo.By("Getting /status")
|
||||
rsResource := schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "replicasets"}
|
||||
rsStatusUnstructured, err := f.DynamicClient.Resource(rsResource).Namespace(ns).Get(context.TODO(), rsName, metav1.GetOptions{}, "status")
|
||||
framework.ExpectNoError(err, "Failed to fetch the status of replicaset %s in namespace %s", rsName, ns)
|
||||
rsStatusBytes, err := json.Marshal(rsStatusUnstructured)
|
||||
framework.ExpectNoError(err, "Failed to marshal unstructured response. %v", err)
|
||||
|
||||
var rsStatus appsv1.ReplicaSet
|
||||
err = json.Unmarshal(rsStatusBytes, &rsStatus)
|
||||
framework.ExpectNoError(err, "Failed to unmarshal JSON bytes to a replicaset object type")
|
||||
framework.Logf("Replicaset %s has Conditions: %v", rsName, rsStatus.Status.Conditions)
|
||||
|
||||
ginkgo.By("updating the Replicaset Status")
|
||||
var statusToUpdate, updatedStatus *appsv1.ReplicaSet
|
||||
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
statusToUpdate, err = rsClient.Get(context.TODO(), rsName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Unable to retrieve replicaset %s", rsName)
|
||||
|
||||
statusToUpdate.Status.Conditions = append(statusToUpdate.Status.Conditions, appsv1.ReplicaSetCondition{
|
||||
Type: "StatusUpdate",
|
||||
Status: "True",
|
||||
Reason: "E2E",
|
||||
Message: "Set from e2e test",
|
||||
})
|
||||
|
||||
updatedStatus, err = rsClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{})
|
||||
return err
|
||||
})
|
||||
framework.ExpectNoError(err, "Failed to update status. %v", err)
|
||||
framework.Logf("updatedStatus.Conditions: %#v", updatedStatus.Status.Conditions)
|
||||
|
||||
ginkgo.By("watching for the ReplicaSet status to be updated")
|
||||
ctx, cancel := context.WithTimeout(context.Background(), rsRetryTimeout)
|
||||
defer cancel()
|
||||
_, err = watchtools.Until(ctx, rsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
if rs, ok := event.Object.(*appsv1.ReplicaSet); ok {
|
||||
found := rs.ObjectMeta.Name == testReplicaSet.ObjectMeta.Name &&
|
||||
rs.ObjectMeta.Namespace == testReplicaSet.ObjectMeta.Namespace &&
|
||||
rs.ObjectMeta.Labels["name"] == testReplicaSet.ObjectMeta.Labels["name"] &&
|
||||
rs.ObjectMeta.Labels["pod"] == testReplicaSet.ObjectMeta.Labels["pod"]
|
||||
if !found {
|
||||
framework.Logf("Observed replicaset %v in namespace %v with annotations: %v & Conditions: %v", rs.ObjectMeta.Name, rs.ObjectMeta.Namespace, rs.Annotations, rs.Status.Conditions)
|
||||
return false, nil
|
||||
}
|
||||
for _, cond := range rs.Status.Conditions {
|
||||
if cond.Type == "StatusUpdate" &&
|
||||
cond.Reason == "E2E" &&
|
||||
cond.Message == "Set from e2e test" {
|
||||
framework.Logf("Found replicaset %v in namespace %v with labels: %v annotations: %v & Conditions: %v", rs.ObjectMeta.Name, rs.ObjectMeta.Namespace, rs.ObjectMeta.Labels, rs.Annotations, rs.Status.Conditions)
|
||||
return found, nil
|
||||
}
|
||||
framework.Logf("Observed replicaset %v in namespace %v with annotations: %v & Conditions: %v", rs.ObjectMeta.Name, rs.ObjectMeta.Namespace, rs.Annotations, rs.Status.Conditions)
|
||||
}
|
||||
}
|
||||
object := strings.Split(fmt.Sprintf("%v", event.Object), "{")[0]
|
||||
framework.Logf("Observed %v event: %+v", object, event.Type)
|
||||
return false, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "failed to locate replicaset %v in namespace %v", testReplicaSet.ObjectMeta.Name, ns)
|
||||
framework.Logf("Replicaset %s has an updated status", rsName)
|
||||
|
||||
ginkgo.By("patching the Replicaset Status")
|
||||
payload := []byte(`{"status":{"conditions":[{"type":"StatusPatched","status":"True"}]}}`)
|
||||
framework.Logf("Patch payload: %v", string(payload))
|
||||
|
||||
patchedReplicaSet, err := rsClient.Patch(context.TODO(), rsName, types.MergePatchType, payload, metav1.PatchOptions{}, "status")
|
||||
framework.ExpectNoError(err, "Failed to patch status. %v", err)
|
||||
framework.Logf("Patched status conditions: %#v", patchedReplicaSet.Status.Conditions)
|
||||
|
||||
ginkgo.By("watching for the Replicaset status to be patched")
|
||||
ctx, cancel = context.WithTimeout(context.Background(), rsRetryTimeout)
|
||||
defer cancel()
|
||||
_, err = watchtools.Until(ctx, rsList.ResourceVersion, w, func(event watch.Event) (bool, error) {
|
||||
if rs, ok := event.Object.(*appsv1.ReplicaSet); ok {
|
||||
found := rs.ObjectMeta.Name == testReplicaSet.ObjectMeta.Name &&
|
||||
rs.ObjectMeta.Namespace == testReplicaSet.ObjectMeta.Namespace &&
|
||||
rs.ObjectMeta.Labels["name"] == testReplicaSet.ObjectMeta.Labels["name"] &&
|
||||
rs.ObjectMeta.Labels["pod"] == testReplicaSet.ObjectMeta.Labels["pod"]
|
||||
if !found {
|
||||
framework.Logf("Observed replicaset %v in namespace %v with annotations: %v & Conditions: %v", rs.ObjectMeta.Name, rs.ObjectMeta.Namespace, rs.Annotations, rs.Status.Conditions)
|
||||
return false, nil
|
||||
}
|
||||
for _, cond := range rs.Status.Conditions {
|
||||
if cond.Type == "StatusPatched" {
|
||||
framework.Logf("Found replicaset %v in namespace %v with labels: %v annotations: %v & Conditions: %v", rs.ObjectMeta.Name, rs.ObjectMeta.Namespace, rs.ObjectMeta.Labels, rs.Annotations, cond)
|
||||
return found, nil
|
||||
}
|
||||
framework.Logf("Observed replicaset %v in namespace %v with annotations: %v & Conditions: %v", rs.ObjectMeta.Name, rs.ObjectMeta.Namespace, rs.Annotations, cond)
|
||||
}
|
||||
}
|
||||
object := strings.Split(fmt.Sprintf("%v", event.Object), "{")[0]
|
||||
framework.Logf("Observed %v event: %+v", object, event.Type)
|
||||
return false, nil
|
||||
})
|
||||
framework.ExpectNoError(err, "failed to locate replicaset %v in namespace %v", testReplicaSet.ObjectMeta.Name, ns)
|
||||
framework.Logf("Replicaset %s has a patched status", rsName)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user