mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-02 00:07:50 +00:00
apiserver: add conformance tests for flowcontrol API
This commit is contained in:
parent
74fefd877f
commit
cac26737cd
@ -28,16 +28,22 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
"github.com/prometheus/common/expfmt"
|
||||
"github.com/prometheus/common/model"
|
||||
|
||||
flowcontrol "k8s.io/api/flowcontrol/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilrand "k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/apiserver/pkg/util/apihelpers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
clientsideflowcontrol "k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
"k8s.io/utils/ptr"
|
||||
@ -247,6 +253,472 @@ var _ = SIGDescribe("API priority and fairness", func() {
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.29
|
||||
Testname: FlowSchema API
|
||||
Description:
|
||||
The flowcontrol.apiserver.k8s.io API group MUST exist in the /apis discovery document.
|
||||
The flowcontrol.apiserver.k8s.io/v1 API group/version MUST exist
|
||||
in the /apis/flowcontrol.apiserver.k8s.io discovery document.
|
||||
The flowschemas and flowschemas/status resources MUST exist
|
||||
in the /apis/flowcontrol.apiserver.k8s.io/v1 discovery document.
|
||||
The flowschema resource must support create, get, list, watch,
|
||||
update, patch, delete, and deletecollection.
|
||||
*/
|
||||
ginkgo.It("should support Priority and Fairness FlowSchema API operations", func(ctx context.Context) {
|
||||
fsVersion := "v1"
|
||||
ginkgo.By("getting /apis")
|
||||
{
|
||||
discoveryGroups, err := f.ClientSet.Discovery().ServerGroups()
|
||||
framework.ExpectNoError(err)
|
||||
found := false
|
||||
for _, group := range discoveryGroups.Groups {
|
||||
if group.Name == flowcontrol.GroupName {
|
||||
for _, version := range group.Versions {
|
||||
if version.Version == fsVersion {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
framework.Failf("expected flowcontrol API group/version, got %#v", discoveryGroups.Groups)
|
||||
}
|
||||
}
|
||||
|
||||
ginkgo.By("getting /apis/flowcontrol.apiserver.k8s.io")
|
||||
{
|
||||
group := &metav1.APIGroup{}
|
||||
err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/flowcontrol.apiserver.k8s.io").Do(ctx).Into(group)
|
||||
framework.ExpectNoError(err)
|
||||
found := false
|
||||
for _, version := range group.Versions {
|
||||
if version.Version == fsVersion {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
framework.Failf("expected flowschemas API version, got %#v", group.Versions)
|
||||
}
|
||||
}
|
||||
|
||||
ginkgo.By("getting /apis/flowcontrol.apiserver.k8s.io/" + fsVersion)
|
||||
{
|
||||
resources, err := f.ClientSet.Discovery().ServerResourcesForGroupVersion(flowcontrol.SchemeGroupVersion.String())
|
||||
framework.ExpectNoError(err)
|
||||
foundFS, foundFSStatus := false, false
|
||||
for _, resource := range resources.APIResources {
|
||||
switch resource.Name {
|
||||
case "flowschemas":
|
||||
foundFS = true
|
||||
case "flowschemas/status":
|
||||
foundFSStatus = true
|
||||
}
|
||||
}
|
||||
if !foundFS {
|
||||
framework.Failf("expected flowschemas, got %#v", resources.APIResources)
|
||||
}
|
||||
if !foundFSStatus {
|
||||
framework.Failf("expected flowschemas/status, got %#v", resources.APIResources)
|
||||
}
|
||||
}
|
||||
|
||||
client := f.ClientSet.FlowcontrolV1().FlowSchemas()
|
||||
labelKey, labelValue := "example-e2e-fs-label", utilrand.String(8)
|
||||
label := fmt.Sprintf("%s=%s", labelKey, labelValue)
|
||||
|
||||
template := &flowcontrol.FlowSchema{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "e2e-example-fs-",
|
||||
Labels: map[string]string{
|
||||
labelKey: labelValue,
|
||||
},
|
||||
},
|
||||
Spec: flowcontrol.FlowSchemaSpec{
|
||||
MatchingPrecedence: 10000,
|
||||
PriorityLevelConfiguration: flowcontrol.PriorityLevelConfigurationReference{
|
||||
Name: "global-default",
|
||||
},
|
||||
DistinguisherMethod: &flowcontrol.FlowDistinguisherMethod{
|
||||
Type: flowcontrol.FlowDistinguisherMethodByUserType,
|
||||
},
|
||||
Rules: []flowcontrol.PolicyRulesWithSubjects{
|
||||
{
|
||||
Subjects: []flowcontrol.Subject{
|
||||
{
|
||||
Kind: flowcontrol.SubjectKindUser,
|
||||
User: &flowcontrol.UserSubject{
|
||||
Name: "example-e2e-non-existent-user",
|
||||
},
|
||||
},
|
||||
},
|
||||
NonResourceRules: []flowcontrol.NonResourcePolicyRule{
|
||||
{
|
||||
Verbs: []string{flowcontrol.VerbAll},
|
||||
NonResourceURLs: []string{flowcontrol.NonResourceAll},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ginkgo.DeferCleanup(func(ctx context.Context) {
|
||||
err := client.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: label})
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
ginkgo.By("creating")
|
||||
_, err := client.Create(ctx, template, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
_, err = client.Create(ctx, template, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
fsCreated, err := client.Create(ctx, template, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("getting")
|
||||
fsRead, err := client.Get(ctx, fsCreated.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(fsRead.UID).To(gomega.Equal(fsCreated.UID))
|
||||
|
||||
ginkgo.By("listing")
|
||||
list, err := client.List(ctx, metav1.ListOptions{LabelSelector: label})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(list.Items).To(gomega.HaveLen(3), "filtered list should have 3 items")
|
||||
|
||||
ginkgo.By("watching")
|
||||
framework.Logf("starting watch")
|
||||
fsWatch, err := client.Watch(ctx, metav1.ListOptions{ResourceVersion: list.ResourceVersion, LabelSelector: label})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("patching")
|
||||
patchBytes := []byte(`{"metadata":{"annotations":{"patched":"true"}},"spec":{"matchingPrecedence":9999}}`)
|
||||
fsPatched, err := client.Patch(ctx, fsCreated.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(fsPatched.Annotations).To(gomega.HaveKeyWithValue("patched", "true"), "patched object should have the applied annotation")
|
||||
gomega.Expect(fsPatched.Spec.MatchingPrecedence).To(gomega.Equal(int32(9999)), "patched object should have the applied spec")
|
||||
|
||||
ginkgo.By("updating")
|
||||
var fsUpdated *flowcontrol.FlowSchema
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
fs, err := client.Get(ctx, fsCreated.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
fsToUpdate := fs.DeepCopy()
|
||||
fsToUpdate.Annotations["updated"] = "true"
|
||||
fsToUpdate.Spec.MatchingPrecedence = int32(9000)
|
||||
|
||||
fsUpdated, err = client.Update(ctx, fsToUpdate, metav1.UpdateOptions{})
|
||||
return err
|
||||
})
|
||||
framework.ExpectNoError(err, "failed to update flowschema %q", fsCreated.Name)
|
||||
gomega.Expect(fsUpdated.Annotations).To(gomega.HaveKeyWithValue("updated", "true"), "updated object should have the applied annotation")
|
||||
gomega.Expect(fsUpdated.Spec.MatchingPrecedence).To(gomega.Equal(int32(9000)), "updated object should have the applied spec")
|
||||
|
||||
framework.Logf("waiting for watch events with expected annotations")
|
||||
for sawAnnotation := false; !sawAnnotation; {
|
||||
select {
|
||||
case evt, ok := <-fsWatch.ResultChan():
|
||||
if !ok {
|
||||
framework.Fail("watch channel should not close")
|
||||
}
|
||||
gomega.Expect(evt.Type).To(gomega.Equal(watch.Modified))
|
||||
fsWatched, isFS := evt.Object.(*flowcontrol.FlowSchema)
|
||||
if !isFS {
|
||||
framework.Failf("expected an object of type: %T, but got %T", &flowcontrol.FlowSchema{}, evt.Object)
|
||||
}
|
||||
if fsWatched.Annotations["patched"] == "true" {
|
||||
sawAnnotation = true
|
||||
fsWatch.Stop()
|
||||
} else {
|
||||
framework.Logf("missing expected annotations, waiting: %#v", fsWatched.Annotations)
|
||||
}
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
framework.Fail("timed out waiting for watch event")
|
||||
}
|
||||
}
|
||||
|
||||
ginkgo.By("getting /status")
|
||||
resource := flowcontrol.SchemeGroupVersion.WithResource("flowschemas")
|
||||
fsStatusRead, err := f.DynamicClient.Resource(resource).Get(ctx, fsCreated.Name, metav1.GetOptions{}, "status")
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(fsStatusRead.GetObjectKind().GroupVersionKind()).To(gomega.Equal(flowcontrol.SchemeGroupVersion.WithKind("FlowSchema")))
|
||||
gomega.Expect(fsStatusRead.GetUID()).To(gomega.Equal(fsCreated.UID))
|
||||
|
||||
ginkgo.By("patching /status")
|
||||
patchBytes = []byte(`{"status":{"conditions":[{"type":"PatchStatusFailed","status":"False","reason":"e2e"}]}}`)
|
||||
fsStatusPatched, err := client.Patch(ctx, fsCreated.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status")
|
||||
framework.ExpectNoError(err)
|
||||
condition := apihelpers.GetFlowSchemaConditionByType(fsStatusPatched, flowcontrol.FlowSchemaConditionType("PatchStatusFailed"))
|
||||
gomega.Expect(condition).NotTo(gomega.BeNil())
|
||||
|
||||
ginkgo.By("updating /status")
|
||||
var fsStatusUpdated *flowcontrol.FlowSchema
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
fs, err := client.Get(ctx, fsCreated.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
fsStatusToUpdate := fs.DeepCopy()
|
||||
fsStatusToUpdate.Status.Conditions = append(fsStatusToUpdate.Status.Conditions, flowcontrol.FlowSchemaCondition{
|
||||
Type: "StatusUpdateFailed",
|
||||
Status: flowcontrol.ConditionFalse,
|
||||
Reason: "E2E",
|
||||
Message: "Set from an e2e test",
|
||||
})
|
||||
fsStatusUpdated, err = client.UpdateStatus(ctx, fsStatusToUpdate, metav1.UpdateOptions{})
|
||||
return err
|
||||
})
|
||||
framework.ExpectNoError(err, "failed to update status of flowschema %q", fsCreated.Name)
|
||||
condition = apihelpers.GetFlowSchemaConditionByType(fsStatusUpdated, flowcontrol.FlowSchemaConditionType("StatusUpdateFailed"))
|
||||
gomega.Expect(condition).NotTo(gomega.BeNil())
|
||||
|
||||
ginkgo.By("deleting")
|
||||
err = client.Delete(ctx, fsCreated.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
_, err = client.Get(ctx, fsCreated.Name, metav1.GetOptions{})
|
||||
if !apierrors.IsNotFound(err) {
|
||||
framework.Failf("expected 404, got %#v", err)
|
||||
}
|
||||
|
||||
list, err = client.List(ctx, metav1.ListOptions{LabelSelector: label})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(list.Items).To(gomega.HaveLen(2), "filtered list should have 2 items")
|
||||
|
||||
ginkgo.By("deleting a collection")
|
||||
err = client.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: label})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
list, err = client.List(ctx, metav1.ListOptions{LabelSelector: label})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(list.Items).To(gomega.BeEmpty(), "filtered list should have 0 items")
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.29
|
||||
Testname: PriorityLevelConfiguration API
|
||||
Description:
|
||||
The flowcontrol.apiserver.k8s.io API group MUST exist in the /apis discovery document.
|
||||
The flowcontrol.apiserver.k8s.io/v1 API group/version MUST exist
|
||||
in the /apis/flowcontrol.apiserver.k8s.io discovery document.
|
||||
The prioritylevelconfiguration and prioritylevelconfiguration/status resources
|
||||
MUST exist in the /apis/flowcontrol.apiserver.k8s.io/v1 discovery document.
|
||||
The prioritylevelconfiguration resource must support create, get, list, watch,
|
||||
update, patch, delete, and deletecollection.
|
||||
*/
|
||||
ginkgo.It("should support Priority and Fairness PriorityLevelConfiguration API operations", func(ctx context.Context) {
|
||||
plVersion := "v1"
|
||||
ginkgo.By("getting /apis")
|
||||
{
|
||||
discoveryGroups, err := f.ClientSet.Discovery().ServerGroups()
|
||||
framework.ExpectNoError(err)
|
||||
found := false
|
||||
for _, group := range discoveryGroups.Groups {
|
||||
if group.Name == flowcontrol.GroupName {
|
||||
for _, version := range group.Versions {
|
||||
if version.Version == plVersion {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
framework.Failf("expected flowcontrol API group/version, got %#v", discoveryGroups.Groups)
|
||||
}
|
||||
}
|
||||
|
||||
ginkgo.By("getting /apis/flowcontrol.apiserver.k8s.io")
|
||||
{
|
||||
group := &metav1.APIGroup{}
|
||||
err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/flowcontrol.apiserver.k8s.io").Do(ctx).Into(group)
|
||||
framework.ExpectNoError(err)
|
||||
found := false
|
||||
for _, version := range group.Versions {
|
||||
if version.Version == plVersion {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
framework.Failf("expected flowcontrol API version, got %#v", group.Versions)
|
||||
}
|
||||
}
|
||||
|
||||
ginkgo.By("getting /apis/flowcontrol.apiserver.k8s.io/" + plVersion)
|
||||
{
|
||||
resources, err := f.ClientSet.Discovery().ServerResourcesForGroupVersion(flowcontrol.SchemeGroupVersion.String())
|
||||
framework.ExpectNoError(err)
|
||||
foundPL, foundPLStatus := false, false
|
||||
for _, resource := range resources.APIResources {
|
||||
switch resource.Name {
|
||||
case "prioritylevelconfigurations":
|
||||
foundPL = true
|
||||
case "prioritylevelconfigurations/status":
|
||||
foundPLStatus = true
|
||||
}
|
||||
}
|
||||
if !foundPL {
|
||||
framework.Failf("expected prioritylevelconfigurations, got %#v", resources.APIResources)
|
||||
}
|
||||
if !foundPLStatus {
|
||||
framework.Failf("expected prioritylevelconfigurations/status, got %#v", resources.APIResources)
|
||||
}
|
||||
}
|
||||
|
||||
client := f.ClientSet.FlowcontrolV1().PriorityLevelConfigurations()
|
||||
labelKey, labelValue := "example-e2e-pl-label", utilrand.String(8)
|
||||
label := fmt.Sprintf("%s=%s", labelKey, labelValue)
|
||||
|
||||
template := &flowcontrol.PriorityLevelConfiguration{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "e2e-example-pl-",
|
||||
Labels: map[string]string{
|
||||
labelKey: labelValue,
|
||||
},
|
||||
},
|
||||
Spec: flowcontrol.PriorityLevelConfigurationSpec{
|
||||
Type: flowcontrol.PriorityLevelEnablementLimited,
|
||||
Limited: &flowcontrol.LimitedPriorityLevelConfiguration{
|
||||
NominalConcurrencyShares: ptr.To(int32(2)),
|
||||
LimitResponse: flowcontrol.LimitResponse{
|
||||
Type: flowcontrol.LimitResponseTypeReject,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ginkgo.DeferCleanup(func(ctx context.Context) {
|
||||
err := client.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: label})
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
ginkgo.By("creating")
|
||||
_, err := client.Create(ctx, template, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
_, err = client.Create(ctx, template, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
plCreated, err := client.Create(ctx, template, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("getting")
|
||||
plRead, err := client.Get(ctx, plCreated.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(plRead.UID).To(gomega.Equal(plCreated.UID))
|
||||
|
||||
ginkgo.By("listing")
|
||||
list, err := client.List(ctx, metav1.ListOptions{LabelSelector: label})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(list.Items).To(gomega.HaveLen(3), "filtered list should have 3 items")
|
||||
|
||||
ginkgo.By("watching")
|
||||
framework.Logf("starting watch")
|
||||
plWatch, err := client.Watch(ctx, metav1.ListOptions{ResourceVersion: list.ResourceVersion, LabelSelector: label})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("patching")
|
||||
patchBytes := []byte(`{"metadata":{"annotations":{"patched":"true"}},"spec":{"limited":{"nominalConcurrencyShares":4}}}`)
|
||||
plPatched, err := client.Patch(ctx, plCreated.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(plPatched.Annotations).To(gomega.HaveKeyWithValue("patched", "true"), "patched object should have the applied annotation")
|
||||
gomega.Expect(plPatched.Spec.Limited.NominalConcurrencyShares).To(gomega.Equal(ptr.To(int32(4))), "patched object should have the applied spec")
|
||||
|
||||
ginkgo.By("updating")
|
||||
var plUpdated *flowcontrol.PriorityLevelConfiguration
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
pl, err := client.Get(ctx, plCreated.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
plToUpdate := pl.DeepCopy()
|
||||
plToUpdate.Annotations["updated"] = "true"
|
||||
plToUpdate.Spec.Limited.NominalConcurrencyShares = ptr.To(int32(6))
|
||||
|
||||
plUpdated, err = client.Update(ctx, plToUpdate, metav1.UpdateOptions{})
|
||||
return err
|
||||
})
|
||||
framework.ExpectNoError(err, "failed to update prioritylevelconfiguration %q", plCreated.Name)
|
||||
gomega.Expect(plUpdated.Annotations).To(gomega.HaveKeyWithValue("updated", "true"), "updated object should have the applied annotation")
|
||||
gomega.Expect(plUpdated.Spec.Limited.NominalConcurrencyShares).To(gomega.Equal(ptr.To(int32(6))), "updated object should have the applied spec")
|
||||
|
||||
framework.Logf("waiting for watch events with expected annotations")
|
||||
for sawAnnotation := false; !sawAnnotation; {
|
||||
select {
|
||||
case evt, ok := <-plWatch.ResultChan():
|
||||
if !ok {
|
||||
framework.Fail("watch channel should not close")
|
||||
}
|
||||
gomega.Expect(evt.Type).To(gomega.Equal(watch.Modified))
|
||||
plWatched, isPL := evt.Object.(*flowcontrol.PriorityLevelConfiguration)
|
||||
if !isPL {
|
||||
framework.Failf("expected an object of type: %T, but got %T", &flowcontrol.PriorityLevelConfiguration{}, evt.Object)
|
||||
}
|
||||
if plWatched.Annotations["patched"] == "true" {
|
||||
sawAnnotation = true
|
||||
plWatch.Stop()
|
||||
} else {
|
||||
framework.Logf("missing expected annotations, waiting: %#v", plWatched.Annotations)
|
||||
}
|
||||
case <-time.After(wait.ForeverTestTimeout):
|
||||
framework.Fail("timed out waiting for watch event")
|
||||
}
|
||||
}
|
||||
|
||||
ginkgo.By("getting /status")
|
||||
resource := flowcontrol.SchemeGroupVersion.WithResource("prioritylevelconfigurations")
|
||||
plStatusRead, err := f.DynamicClient.Resource(resource).Get(ctx, plCreated.Name, metav1.GetOptions{}, "status")
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(plStatusRead.GetObjectKind().GroupVersionKind()).To(gomega.Equal(flowcontrol.SchemeGroupVersion.WithKind("PriorityLevelConfiguration")))
|
||||
gomega.Expect(plStatusRead.GetUID()).To(gomega.Equal(plCreated.UID))
|
||||
|
||||
ginkgo.By("patching /status")
|
||||
patchBytes = []byte(`{"status":{"conditions":[{"type":"PatchStatusFailed","status":"False","reason":"e2e"}]}}`)
|
||||
plStatusPatched, err := client.Patch(ctx, plCreated.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status")
|
||||
framework.ExpectNoError(err)
|
||||
condition := apihelpers.GetPriorityLevelConfigurationConditionByType(plStatusPatched, flowcontrol.PriorityLevelConfigurationConditionType("PatchStatusFailed"))
|
||||
gomega.Expect(condition).NotTo(gomega.BeNil())
|
||||
|
||||
ginkgo.By("updating /status")
|
||||
var plStatusUpdated *flowcontrol.PriorityLevelConfiguration
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
pl, err := client.Get(ctx, plCreated.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
plStatusToUpdate := pl.DeepCopy()
|
||||
plStatusToUpdate.Status.Conditions = append(plStatusToUpdate.Status.Conditions, flowcontrol.PriorityLevelConfigurationCondition{
|
||||
Type: "StatusUpdateFailed",
|
||||
Status: flowcontrol.ConditionFalse,
|
||||
Reason: "E2E",
|
||||
Message: "Set from an e2e test",
|
||||
})
|
||||
plStatusUpdated, err = client.UpdateStatus(ctx, plStatusToUpdate, metav1.UpdateOptions{})
|
||||
return err
|
||||
})
|
||||
framework.ExpectNoError(err, "failed to update status of prioritylevelconfiguration %q", plCreated.Name)
|
||||
condition = apihelpers.GetPriorityLevelConfigurationConditionByType(plStatusUpdated, flowcontrol.PriorityLevelConfigurationConditionType("StatusUpdateFailed"))
|
||||
gomega.Expect(condition).NotTo(gomega.BeNil())
|
||||
|
||||
ginkgo.By("deleting")
|
||||
err = client.Delete(ctx, plCreated.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
_, err = client.Get(ctx, plCreated.Name, metav1.GetOptions{})
|
||||
if !apierrors.IsNotFound(err) {
|
||||
framework.Failf("expected 404, got %#v", err)
|
||||
}
|
||||
|
||||
list, err = client.List(ctx, metav1.ListOptions{LabelSelector: label})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(list.Items).To(gomega.HaveLen(2), "filtered list should have 2 items")
|
||||
|
||||
ginkgo.By("deleting a collection")
|
||||
err = client.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: label})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
list, err = client.List(ctx, metav1.ListOptions{LabelSelector: label})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(list.Items).To(gomega.BeEmpty(), "filtered list should have 0 items")
|
||||
})
|
||||
})
|
||||
|
||||
// createPriorityLevel creates a priority level with the provided assured
|
||||
|
Loading…
Reference in New Issue
Block a user