mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 20:53:33 +00:00
Merge pull request #104577 from smarterclayton/smaller_filter_master
kubelet: Admission must exclude completed pods and avoid races
This commit is contained in:
commit
bbbeceb6aa
@ -95,6 +95,10 @@ func (kl *Kubelet) listPodsFromDisk() ([]types.UID, error) {
|
|||||||
// GetActivePods returns pods that may have a running container (a
|
// GetActivePods returns pods that may have a running container (a
|
||||||
// terminated pod is one that is known to have no running containers and
|
// terminated pod is one that is known to have no running containers and
|
||||||
// will not get any more).
|
// will not get any more).
|
||||||
|
//
|
||||||
|
// TODO: This method must include pods that have been force deleted from
|
||||||
|
// the config source (and thus removed from the pod manager) but are still
|
||||||
|
// terminating.
|
||||||
func (kl *Kubelet) GetActivePods() []*v1.Pod {
|
func (kl *Kubelet) GetActivePods() []*v1.Pod {
|
||||||
allPods := kl.podManager.GetPods()
|
allPods := kl.podManager.GetPods()
|
||||||
activePods := kl.filterOutTerminatedPods(allPods)
|
activePods := kl.filterOutTerminatedPods(allPods)
|
||||||
@ -964,12 +968,17 @@ func (kl *Kubelet) podResourcesAreReclaimed(pod *v1.Pod) bool {
|
|||||||
return kl.PodResourcesAreReclaimed(pod, status)
|
return kl.PodResourcesAreReclaimed(pod, status)
|
||||||
}
|
}
|
||||||
|
|
||||||
// filterOutTerminatedPods returns the pods that could still have running
|
// filterOutTerminatedPods returns pods that are not in a terminal phase
|
||||||
// containers
|
// or are known to be fully terminated. This method should only be used
|
||||||
|
// when the set of pods being filtered is upstream of the pod worker, i.e.
|
||||||
|
// the pods the pod manager is aware of.
|
||||||
func (kl *Kubelet) filterOutTerminatedPods(pods []*v1.Pod) []*v1.Pod {
|
func (kl *Kubelet) filterOutTerminatedPods(pods []*v1.Pod) []*v1.Pod {
|
||||||
var filteredPods []*v1.Pod
|
filteredPods := make([]*v1.Pod, 0, len(pods))
|
||||||
for _, p := range pods {
|
for _, p := range pods {
|
||||||
if !kl.podWorkers.CouldHaveRunningContainers(p.UID) {
|
if kl.podWorkers.IsPodKnownTerminated(p.UID) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if p.Status.Phase == v1.PodSucceeded || p.Status.Phase == v1.PodFailed {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
filteredPods = append(filteredPods, p)
|
filteredPods = append(filteredPods, p)
|
||||||
|
@ -130,6 +130,14 @@ type PodWorkers interface {
|
|||||||
// true.
|
// true.
|
||||||
SyncKnownPods(desiredPods []*v1.Pod) map[types.UID]PodWorkType
|
SyncKnownPods(desiredPods []*v1.Pod) map[types.UID]PodWorkType
|
||||||
|
|
||||||
|
// IsPodKnownTerminated returns true if the provided pod UID is known by the pod
|
||||||
|
// worker to be terminated. If the pod has been force deleted and the pod worker
|
||||||
|
// has completed termination this method will return false, so this method should
|
||||||
|
// only be used to filter out pods from the desired set such as in admission.
|
||||||
|
//
|
||||||
|
// Intended for use by the kubelet config loops, but not subsystems, which should
|
||||||
|
// use ShouldPod*().
|
||||||
|
IsPodKnownTerminated(uid types.UID) bool
|
||||||
// CouldHaveRunningContainers returns true before the pod workers have synced,
|
// CouldHaveRunningContainers returns true before the pod workers have synced,
|
||||||
// once the pod workers see the pod (syncPod could be called), and returns false
|
// once the pod workers see the pod (syncPod could be called), and returns false
|
||||||
// after the pod has been terminated (running containers guaranteed stopped).
|
// after the pod has been terminated (running containers guaranteed stopped).
|
||||||
@ -394,6 +402,16 @@ func newPodWorkers(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *podWorkers) IsPodKnownTerminated(uid types.UID) bool {
|
||||||
|
p.podLock.Lock()
|
||||||
|
defer p.podLock.Unlock()
|
||||||
|
if status, ok := p.podSyncStatuses[uid]; ok {
|
||||||
|
return status.IsTerminated()
|
||||||
|
}
|
||||||
|
// if the pod is not known, we return false (pod worker is not aware of it)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
func (p *podWorkers) CouldHaveRunningContainers(uid types.UID) bool {
|
func (p *podWorkers) CouldHaveRunningContainers(uid types.UID) bool {
|
||||||
p.podLock.Lock()
|
p.podLock.Lock()
|
||||||
defer p.podLock.Unlock()
|
defer p.podLock.Unlock()
|
||||||
|
@ -49,6 +49,7 @@ type fakePodWorkers struct {
|
|||||||
statusLock sync.Mutex
|
statusLock sync.Mutex
|
||||||
running map[types.UID]bool
|
running map[types.UID]bool
|
||||||
terminating map[types.UID]bool
|
terminating map[types.UID]bool
|
||||||
|
terminated map[types.UID]bool
|
||||||
terminationRequested map[types.UID]bool
|
terminationRequested map[types.UID]bool
|
||||||
removeRuntime map[types.UID]bool
|
removeRuntime map[types.UID]bool
|
||||||
removeContent map[types.UID]bool
|
removeContent map[types.UID]bool
|
||||||
@ -85,6 +86,11 @@ func (f *fakePodWorkers) SyncKnownPods(desiredPods []*v1.Pod) map[types.UID]PodW
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *fakePodWorkers) IsPodKnownTerminated(uid types.UID) bool {
|
||||||
|
f.statusLock.Lock()
|
||||||
|
defer f.statusLock.Unlock()
|
||||||
|
return f.terminated[uid]
|
||||||
|
}
|
||||||
func (f *fakePodWorkers) CouldHaveRunningContainers(uid types.UID) bool {
|
func (f *fakePodWorkers) CouldHaveRunningContainers(uid types.UID) bool {
|
||||||
f.statusLock.Lock()
|
f.statusLock.Lock()
|
||||||
defer f.statusLock.Unlock()
|
defer f.statusLock.Unlock()
|
||||||
|
Loading…
Reference in New Issue
Block a user