mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 10:20:51 +00:00
Merge pull request #99314 from soltysh/cronjob_e2e_api
cronjob: Add CRUD tests for CronJob API verbs
This commit is contained in:
commit
6cacfe9e3a
@ -18,6 +18,7 @@ package apps
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@ -29,9 +30,14 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/util/retry"
|
||||
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/controller/job"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
@ -280,6 +286,149 @@ var _ = SIGDescribe("CronJob", func() {
|
||||
ensureHistoryLimits(f.ClientSet, f.Namespace.Name, cronJob)
|
||||
})
|
||||
|
||||
ginkgo.It("should support CronJob API operations", func() {
|
||||
ginkgo.By("Creating a cronjob")
|
||||
successLimit := int32(1)
|
||||
failedLimit := int32(0)
|
||||
cjTemplate := newTestCronJob("test-api", "* */1 * * ?", batchv1beta1.AllowConcurrent,
|
||||
successCommand, &successLimit, &failedLimit)
|
||||
cjTemplate.Labels = map[string]string{
|
||||
"special-label": f.UniqueName,
|
||||
}
|
||||
|
||||
ns := f.Namespace.Name
|
||||
cjVersion := "v1beta1"
|
||||
cjClient := f.ClientSet.BatchV1beta1().CronJobs(ns)
|
||||
|
||||
ginkgo.By("creating")
|
||||
createdCronJob, err := cjClient.Create(context.TODO(), cjTemplate, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("getting")
|
||||
gottenCronJob, err := cjClient.Get(context.TODO(), createdCronJob.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(gottenCronJob.UID, createdCronJob.UID)
|
||||
|
||||
ginkgo.By("listing")
|
||||
cjs, err := cjClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(len(cjs.Items), 1, "filtered list should have 1 item")
|
||||
|
||||
ginkgo.By("watching")
|
||||
framework.Logf("starting watch")
|
||||
cjWatch, err := cjClient.Watch(context.TODO(), metav1.ListOptions{ResourceVersion: cjs.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Test cluster-wide list and watch
|
||||
clusterCJClient := f.ClientSet.BatchV1beta1().CronJobs("")
|
||||
ginkgo.By("cluster-wide listing")
|
||||
clusterCJs, err := clusterCJClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(len(clusterCJs.Items), 1, "filtered list should have 1 items")
|
||||
|
||||
ginkgo.By("cluster-wide watching")
|
||||
framework.Logf("starting watch")
|
||||
_, err = clusterCJClient.Watch(context.TODO(), metav1.ListOptions{ResourceVersion: cjs.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("patching")
|
||||
patchedCronJob, err := cjClient.Patch(context.TODO(), createdCronJob.Name, types.MergePatchType,
|
||||
[]byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(patchedCronJob.Annotations["patched"], "true", "patched object should have the applied annotation")
|
||||
|
||||
ginkgo.By("updating")
|
||||
var cjToUpdate, updatedCronJob *batchv1beta1.CronJob
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
cjToUpdate, err = cjClient.Get(context.TODO(), createdCronJob.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cjToUpdate.Annotations["updated"] = "true"
|
||||
updatedCronJob, err = cjClient.Update(context.TODO(), cjToUpdate, metav1.UpdateOptions{})
|
||||
return err
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(updatedCronJob.Annotations["updated"], "true", "updated object should have the applied annotation")
|
||||
|
||||
framework.Logf("waiting for watch events with expected annotations")
|
||||
for sawAnnotations := false; !sawAnnotations; {
|
||||
select {
|
||||
case evt, ok := <-cjWatch.ResultChan():
|
||||
framework.ExpectEqual(ok, true, "watch channel should not close")
|
||||
framework.ExpectEqual(evt.Type, watch.Modified)
|
||||
watchedCronJob, isCronJob := evt.Object.(*batchv1beta1.CronJob)
|
||||
framework.ExpectEqual(isCronJob, true, fmt.Sprintf("expected CronJob, got %T", evt.Object))
|
||||
if watchedCronJob.Annotations["patched"] == "true" {
|
||||
framework.Logf("saw patched and updated annotations")
|
||||
sawAnnotations = true
|
||||
cjWatch.Stop()
|
||||
} else {
|
||||
framework.Logf("missing expected annotations, waiting: %#v", watchedCronJob.Annotations)
|
||||
}
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
framework.Fail("timed out waiting for watch event")
|
||||
}
|
||||
}
|
||||
|
||||
// /status subresource operations
|
||||
ginkgo.By("patching /status")
|
||||
// we need to use RFC3339 version since conversion over the wire cuts nanoseconds
|
||||
now1 := metav1.Now().Rfc3339Copy()
|
||||
cjStatus := batchv1beta1.CronJobStatus{
|
||||
LastScheduleTime: &now1,
|
||||
}
|
||||
cjStatusJSON, err := json.Marshal(cjStatus)
|
||||
framework.ExpectNoError(err)
|
||||
patchedStatus, err := cjClient.Patch(context.TODO(), createdCronJob.Name, types.MergePatchType,
|
||||
[]byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":`+string(cjStatusJSON)+`}`),
|
||||
metav1.PatchOptions{}, "status")
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(patchedStatus.Status.LastScheduleTime.Equal(&now1), true, "patched object should have the applied lastScheduleTime status")
|
||||
framework.ExpectEqual(patchedStatus.Annotations["patchedstatus"], "true", "patched object should have the applied annotation")
|
||||
|
||||
ginkgo.By("updating /status")
|
||||
// we need to use RFC3339 version since conversion over the wire cuts nanoseconds
|
||||
now2 := metav1.Now().Rfc3339Copy()
|
||||
var statusToUpdate, updatedStatus *batchv1beta1.CronJob
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
statusToUpdate, err = cjClient.Get(context.TODO(), createdCronJob.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
statusToUpdate.Status.LastScheduleTime = &now2
|
||||
updatedStatus, err = cjClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{})
|
||||
return err
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(updatedStatus.Status.LastScheduleTime.Equal(&now2), true, fmt.Sprintf("updated object status expected to have updated lastScheduleTime %#v, got %#v", statusToUpdate.Status.LastScheduleTime, updatedStatus.Status.LastScheduleTime))
|
||||
|
||||
ginkgo.By("get /status")
|
||||
cjResource := schema.GroupVersionResource{Group: "batch", Version: cjVersion, Resource: "cronjobs"}
|
||||
gottenStatus, err := f.DynamicClient.Resource(cjResource).Namespace(ns).Get(context.TODO(), createdCronJob.Name, metav1.GetOptions{}, "status")
|
||||
framework.ExpectNoError(err)
|
||||
statusUID, _, err := unstructured.NestedFieldCopy(gottenStatus.Object, "metadata", "uid")
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(string(createdCronJob.UID), statusUID, fmt.Sprintf("createdCronJob.UID: %v expected to match statusUID: %v ", createdCronJob.UID, statusUID))
|
||||
|
||||
// CronJob resource delete operations
|
||||
ginkgo.By("deleting a collection")
|
||||
expectFinalizer := func(cj *batchv1beta1.CronJob, msg string) {
|
||||
framework.ExpectNotEqual(cj.DeletionTimestamp, nil, fmt.Sprintf("expected deletionTimestamp, got nil on step: %q, cronjob: %+v", msg, cj))
|
||||
framework.ExpectEqual(len(cj.Finalizers) > 0, true, fmt.Sprintf("expected finalizers on cronjob, got none on step: %q, cronjob: %+v", msg, cj))
|
||||
}
|
||||
err = cjClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
|
||||
framework.ExpectNoError(err)
|
||||
cjs, err = cjClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName})
|
||||
framework.ExpectNoError(err)
|
||||
// Should have <= 1 items since some cronjobs might not have been deleted yet due to finalizers
|
||||
framework.ExpectEqual(len(cjs.Items) <= 1, true, "filtered list should be <= 1")
|
||||
// Validate finalizers
|
||||
for _, cj := range cjs.Items {
|
||||
expectFinalizer(&cj, "deleting cronjob collection")
|
||||
}
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
func ensureHistoryLimits(c clientset.Interface, ns string, cronJob *batchv1beta1.CronJob) {
|
||||
|
Loading…
Reference in New Issue
Block a user