mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Merge pull request #91126 from Huang-Wei/pr/85910
Fix scheduler cache inconsistency upon Pods with same name but different UIDs
This commit is contained in:
commit
938875582b
@ -243,6 +243,15 @@ func (sched *Scheduler) updatePodInCache(oldObj, newObj interface{}) {
|
||||
return
|
||||
}
|
||||
|
||||
// A Pod delete event followed by an immediate Pod add event may be merged
|
||||
// into a Pod update event. In this case, we should invalidate the old Pod, and
|
||||
// then add the new Pod.
|
||||
if oldPod.UID != newPod.UID {
|
||||
sched.deletePodFromCache(oldObj)
|
||||
sched.addPodToCache(newObj)
|
||||
return
|
||||
}
|
||||
|
||||
// NOTE: Updates must be written to scheduler cache before invalidating
|
||||
// equivalence cache, because we could snapshot equivalence cache after the
|
||||
// invalidation and then snapshot the cache itself. If the cache is
|
||||
|
@ -19,11 +19,14 @@ package scheduler
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
)
|
||||
|
||||
func TestSkipPodUpdate(t *testing.T) {
|
||||
@ -346,3 +349,51 @@ func TestNodeConditionsChanged(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdatePodInCache(t *testing.T) {
|
||||
ttl := 10 * time.Second
|
||||
nodeName := "node"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
oldObj interface{}
|
||||
newObj interface{}
|
||||
}{
|
||||
{
|
||||
name: "pod updated with the same UID",
|
||||
oldObj: withPodName(podWithPort("oldUID", nodeName, 80), "pod"),
|
||||
newObj: withPodName(podWithPort("oldUID", nodeName, 8080), "pod"),
|
||||
},
|
||||
{
|
||||
name: "pod updated with different UIDs",
|
||||
oldObj: withPodName(podWithPort("oldUID", nodeName, 80), "pod"),
|
||||
newObj: withPodName(podWithPort("newUID", nodeName, 8080), "pod"),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
schedulerCache := cache.New(ttl, stopCh)
|
||||
schedulerQueue := queue.NewPriorityQueue(nil)
|
||||
sched := &Scheduler{
|
||||
SchedulerCache: schedulerCache,
|
||||
SchedulingQueue: schedulerQueue,
|
||||
}
|
||||
sched.addPodToCache(tt.oldObj)
|
||||
sched.updatePodInCache(tt.oldObj, tt.newObj)
|
||||
pod, err := sched.SchedulerCache.GetPod(tt.newObj.(*v1.Pod))
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get pod from scheduler: %v", err)
|
||||
}
|
||||
if pod.UID != tt.newObj.(*v1.Pod).UID {
|
||||
t.Errorf("Want pod UID %v, got %v", tt.newObj.(*v1.Pod).UID, pod.UID)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func withPodName(pod *v1.Pod, name string) *v1.Pod {
|
||||
pod.Name = name
|
||||
return pod
|
||||
}
|
||||
|
@ -92,9 +92,8 @@ func (fp fakePodPreemptor) removeNominatedNodeName(pod *v1.Pod) error {
|
||||
func podWithID(id, desiredHost string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: id,
|
||||
UID: types.UID(id),
|
||||
SelfLink: fmt.Sprintf("/api/v1/%s/%s", string(v1.ResourcePods), id),
|
||||
Name: id,
|
||||
UID: types.UID(id),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: desiredHost,
|
||||
@ -110,7 +109,6 @@ func deletingPod(id string) *v1.Pod {
|
||||
Name: id,
|
||||
UID: types.UID(id),
|
||||
DeletionTimestamp: &deletionTimestamp,
|
||||
SelfLink: fmt.Sprintf("/api/v1/%s/%s", string(v1.ResourcePods), id),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "",
|
||||
|
Loading…
Reference in New Issue
Block a user