mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-02-22 07:03:28 +00:00
Merge pull request #131843 from pohly/resourcequota-scope-flake
ResourceQuota: partial fix "should verify ResourceQuota with terminating scopes through scope selectors" flake
This commit is contained in:
@@ -52,6 +52,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
"k8s.io/kubernetes/test/utils/crd"
|
||||
"k8s.io/kubernetes/test/utils/format"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
"k8s.io/utils/pointer"
|
||||
@@ -60,6 +61,7 @@ import (
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
"github.com/onsi/gomega/gcustom"
|
||||
gomegatypes "github.com/onsi/gomega/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -796,7 +798,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
|
||||
usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
|
||||
usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
|
||||
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
|
||||
err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
|
||||
err = ensureResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Deleting the pod")
|
||||
@@ -837,6 +839,8 @@ var _ = SIGDescribe("ResourceQuota", func() {
|
||||
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
|
||||
err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
|
||||
framework.ExpectNoError(err)
|
||||
err = ensureResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Deleting the pod")
|
||||
err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, podName, *metav1.NewDeleteOptions(0))
|
||||
@@ -1558,6 +1562,12 @@ var _ = SIGDescribe("ResourceQuota", func() {
|
||||
usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
|
||||
err = waitForResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
|
||||
framework.ExpectNoError(err)
|
||||
// It already was like this before, so typically waitForResourceQuota returns immediately.
|
||||
// Keep checking for a while to detect unexpected changes. There has been a test flake
|
||||
// where updating the pod failed with:
|
||||
// pods "test-pod" is forbidden: exceeded quota: quota-terminating, requested: pods=1, used: pods=1, limited: pods=1
|
||||
err = ensureResourceQuota(ctx, f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Updating the pod to have an active deadline")
|
||||
err = retry.RetryOnConflict(retry.DefaultRetry, func() error {
|
||||
@@ -2484,10 +2494,31 @@ func countResourceQuota(ctx context.Context, c clientset.Interface, namespace st
|
||||
// Wait for resource quota status to show the expected used resources value.
|
||||
// Other resources are ignored.
|
||||
func waitForResourceQuota(ctx context.Context, c clientset.Interface, ns, quotaName string, used v1.ResourceList) error {
|
||||
var lastResourceQuota *v1.ResourceQuota
|
||||
err := framework.Gomega().Eventually(ctx, framework.GetObject(c.CoreV1().ResourceQuotas(ns).Get, quotaName, metav1.GetOptions{})).Should(haveUsedResources(used, &lastResourceQuota))
|
||||
if lastResourceQuota != nil && err == nil {
|
||||
framework.Logf("Got expected ResourceQuota:\n%s", format.Object(lastResourceQuota, 1))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensure that the resource quota status shows the expected used resources value and does not change.
|
||||
// Other resources are ignored.
|
||||
func ensureResourceQuota(ctx context.Context, c clientset.Interface, ns, quotaName string, used v1.ResourceList) error {
|
||||
// The longer we check the higher the confidence that it really remains the same.
|
||||
// But we don't want to delay too long because it makes the test slower.
|
||||
err := framework.Gomega().Consistently(ctx, framework.GetObject(c.CoreV1().ResourceQuotas(ns).Get, quotaName, metav1.GetOptions{})).WithTimeout(10 * time.Second).Should(haveUsedResources(used, nil))
|
||||
return err
|
||||
}
|
||||
|
||||
func haveUsedResources(used v1.ResourceList, lastResourceQuota **v1.ResourceQuota) gomegatypes.GomegaMatcher {
|
||||
// The template emits the actual ResourceQuota object as YAML.
|
||||
// In particular the ManagedFields are interesting because both
|
||||
// kube-apiserver and kube-controller-manager set the status.
|
||||
haveUsedResources := gcustom.MakeMatcher(func(resourceQuota *v1.ResourceQuota) (bool, error) {
|
||||
return gcustom.MakeMatcher(func(resourceQuota *v1.ResourceQuota) (bool, error) {
|
||||
if lastResourceQuota != nil {
|
||||
*lastResourceQuota = resourceQuota
|
||||
}
|
||||
// used may not yet be calculated
|
||||
if resourceQuota.Status.Used == nil {
|
||||
return false, nil
|
||||
@@ -2500,8 +2531,6 @@ func waitForResourceQuota(ctx context.Context, c clientset.Interface, ns, quotaN
|
||||
}
|
||||
return true, nil
|
||||
}).WithTemplate("Expected:\n{{.FormattedActual}}\n{{.To}} have the following .status.used entries:\n{{range $key, $value := .Data}} {{$key}}: \"{{$value.ToUnstructured}}\"\n{{end}}").WithTemplateData(used /* Formatting of the map is done inside the template. */)
|
||||
err := framework.Gomega().Eventually(ctx, framework.GetObject(c.CoreV1().ResourceQuotas(ns).Get, quotaName, metav1.GetOptions{})).Should(haveUsedResources)
|
||||
return err
|
||||
}
|
||||
|
||||
// updateResourceQuotaUntilUsageAppears updates the resource quota object until the usage is populated
|
||||
|
||||
Reference in New Issue
Block a user