Merge pull request #117194 from sanposhiho/revert-preenqueue

Revert "Optimization on running prePreEnqueuePlugins before adding pods into activeQ"
This commit is contained in:
Kubernetes Prow Robot 2023-04-13 16:00:50 -07:00 committed by GitHub
commit 29c8fb678c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 12 additions and 47 deletions

View File

@ -8,6 +8,8 @@
- [Node Binaries](#node-binaries)
- [Container Images](#container-images)
- [Changelog since v1.26.0](#changelog-since-v1260)
- [Known Issues](#known-issues)
- [The PreEnqueue extension point doesn't work in backoffQ](#the-preEnqueue-extension-point-doesnt-work-for-pods-going-to-activeq-through-backoffq)
- [Urgent Upgrade Notes](#urgent-upgrade-notes)
- [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade)
- [Changes by Kind](#changes-by-kind)
@ -198,6 +200,15 @@ name | architectures
## Changelog since v1.26.0
## Known Issues
### The PreEnqueue extension point doesn't work for Pods going to activeQ through backoffQ
In v1.26.0, we've found the bug that the PreEnqueue extension point doesn't work for Pods going to activeQ through backoffQ.
It doesn't affect any of the vanilla Kubernetes behavior, but, may break custom PreEnqueue plugins.
The cause PR is [reverted](https://github.com/kubernetes/kubernetes/pull/117194) by v1.26.1.
## Urgent Upgrade Notes
### (No, really, you MUST read this before you upgrade)

View File

@ -553,9 +553,7 @@ func (p *PriorityQueue) flushBackoffQCompleted() {
klog.ErrorS(err, "Unable to pop pod from backoff queue despite backoff completion", "pod", klog.KObj(pod))
break
}
if err := p.activeQ.Add(pInfo); err != nil {
klog.ErrorS(err, "Error adding pod to the active queue", "pod", klog.KObj(pInfo.Pod))
} else {
if added, _ := p.addToActiveQ(pInfo); added {
klog.V(5).InfoS("Pod moved to an internal scheduling queue", "pod", klog.KObj(pod), "event", BackoffComplete, "queue", activeQName)
metrics.SchedulerQueueIncomingPods.WithLabelValues("active", BackoffComplete).Inc()
activated = true

View File

@ -452,8 +452,6 @@ func TestPriorityQueue_Activate(t *testing.T) {
}
type preEnqueuePlugin struct {
// called counts the number of calling PreEnqueue()
called int
allowlists []string
}
@ -462,7 +460,6 @@ func (pl *preEnqueuePlugin) Name() string {
}
func (pl *preEnqueuePlugin) PreEnqueue(ctx context.Context, p *v1.Pod) *framework.Status {
pl.called++
for _, allowed := range pl.allowlists {
for label := range p.Labels {
if label == allowed {
@ -544,47 +541,6 @@ func TestPriorityQueue_addToActiveQ(t *testing.T) {
}
}
func TestPriorityQueue_flushBackoffQCompleted(t *testing.T) {
tests := []struct {
name string
plugin framework.PreEnqueuePlugin
pod *v1.Pod
operations []operation
wantPreEnqueuePluginCalled int
}{
{
name: "preEnqueue plugin registered, not running preEnqueue plugin when backoff completed",
plugin: &preEnqueuePlugin{},
pod: st.MakePod().Name("foo").Label("foo", "").Obj(),
operations: []operation{
addPodBackoffQ,
flushBackoffQ,
},
wantPreEnqueuePluginCalled: 0,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
m := map[string][]framework.PreEnqueuePlugin{"": {tt.plugin}}
c := testingclock.NewFakeClock(time.Now())
q := NewTestQueueWithObjects(ctx, newDefaultQueueSort(), []runtime.Object{tt.pod}, WithPreEnqueuePluginMap(m),
WithPodInitialBackoffDuration(time.Second*1), WithPodMaxBackoffDuration(time.Second*60), WithClock(c))
pInfo := newQueuedPodInfoForLookup(tt.pod)
pInfo.Gated = true
for _, op := range tt.operations {
op(q, pInfo)
}
if tt.wantPreEnqueuePluginCalled != tt.plugin.(*preEnqueuePlugin).called {
t.Errorf("Unexpected number of calling preEnqueue: want %v, but got %v", tt.wantPreEnqueuePluginCalled, tt.plugin.(*preEnqueuePlugin).called)
}
})
}
}
func BenchmarkMoveAllToActiveOrBackoffQueue(b *testing.B) {
tests := []struct {
name string