mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 15:05:27 +00:00
cpu manager: do not clean admitted pods from the state
Signed-off-by: Artyom Lukianov <alukiano@redhat.com>
This commit is contained in:
parent
e2e3c2d01c
commit
66babd1a90
@ -133,6 +133,9 @@ type manager struct {
|
|||||||
|
|
||||||
// allocatableCPUs is the set of online CPUs as reported by the system
|
// allocatableCPUs is the set of online CPUs as reported by the system
|
||||||
allocatableCPUs cpuset.CPUSet
|
allocatableCPUs cpuset.CPUSet
|
||||||
|
|
||||||
|
// pendingAdmissionPod contain the pod during the admission phase
|
||||||
|
pendingAdmissionPod *v1.Pod
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ Manager = &manager{}
|
var _ Manager = &manager{}
|
||||||
@ -236,6 +239,10 @@ func (m *manager) Start(activePods ActivePodsFunc, sourcesReady config.SourcesRe
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *manager) Allocate(p *v1.Pod, c *v1.Container) error {
|
func (m *manager) Allocate(p *v1.Pod, c *v1.Container) error {
|
||||||
|
// The pod is during the admission phase. We need to save the pod to avoid it
|
||||||
|
// being cleaned before the admission ended
|
||||||
|
m.setPodPendingAdmission(p)
|
||||||
|
|
||||||
// Garbage collect any stranded resources before allocating CPUs.
|
// Garbage collect any stranded resources before allocating CPUs.
|
||||||
m.removeStaleState()
|
m.removeStaleState()
|
||||||
|
|
||||||
@ -304,6 +311,9 @@ func (m *manager) State() state.Reader {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *manager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
|
func (m *manager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[string][]topologymanager.TopologyHint {
|
||||||
|
// The pod is during the admission phase. We need to save the pod to avoid it
|
||||||
|
// being cleaned before the admission ended
|
||||||
|
m.setPodPendingAdmission(pod)
|
||||||
// Garbage collect any stranded resources before providing TopologyHints
|
// Garbage collect any stranded resources before providing TopologyHints
|
||||||
m.removeStaleState()
|
m.removeStaleState()
|
||||||
// Delegate to active policy
|
// Delegate to active policy
|
||||||
@ -311,6 +321,9 @@ func (m *manager) GetTopologyHints(pod *v1.Pod, container *v1.Container) map[str
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (m *manager) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint {
|
func (m *manager) GetPodTopologyHints(pod *v1.Pod) map[string][]topologymanager.TopologyHint {
|
||||||
|
// The pod is during the admission phase. We need to save the pod to avoid it
|
||||||
|
// being cleaned before the admission ended
|
||||||
|
m.setPodPendingAdmission(pod)
|
||||||
// Garbage collect any stranded resources before providing TopologyHints
|
// Garbage collect any stranded resources before providing TopologyHints
|
||||||
m.removeStaleState()
|
m.removeStaleState()
|
||||||
// Delegate to active policy
|
// Delegate to active policy
|
||||||
@ -343,11 +356,14 @@ func (m *manager) removeStaleState() {
|
|||||||
defer m.Unlock()
|
defer m.Unlock()
|
||||||
|
|
||||||
// Get the list of active pods.
|
// Get the list of active pods.
|
||||||
activePods := m.activePods()
|
activeAndAdmittedPods := m.activePods()
|
||||||
|
if m.pendingAdmissionPod != nil {
|
||||||
|
activeAndAdmittedPods = append(activeAndAdmittedPods, m.pendingAdmissionPod)
|
||||||
|
}
|
||||||
|
|
||||||
// Build a list of (podUID, containerName) pairs for all containers in all active Pods.
|
// Build a list of (podUID, containerName) pairs for all containers in all active Pods.
|
||||||
activeContainers := make(map[string]map[string]struct{})
|
activeContainers := make(map[string]map[string]struct{})
|
||||||
for _, pod := range activePods {
|
for _, pod := range activeAndAdmittedPods {
|
||||||
activeContainers[string(pod.UID)] = make(map[string]struct{})
|
activeContainers[string(pod.UID)] = make(map[string]struct{})
|
||||||
for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
|
for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
|
||||||
activeContainers[string(pod.UID)][container.Name] = struct{}{}
|
activeContainers[string(pod.UID)][container.Name] = struct{}{}
|
||||||
@ -493,3 +509,10 @@ func (m *manager) updateContainerCPUSet(containerID string, cpus cpuset.CPUSet)
|
|||||||
func (m *manager) GetCPUs(podUID, containerName string) cpuset.CPUSet {
|
func (m *manager) GetCPUs(podUID, containerName string) cpuset.CPUSet {
|
||||||
return m.state.GetCPUSetOrDefault(podUID, containerName)
|
return m.state.GetCPUSetOrDefault(podUID, containerName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *manager) setPodPendingAdmission(pod *v1.Pod) {
|
||||||
|
m.Lock()
|
||||||
|
defer m.Unlock()
|
||||||
|
|
||||||
|
m.pendingAdmissionPod = pod
|
||||||
|
}
|
||||||
|
@ -277,7 +277,7 @@ func TestCPUManagerAdd(t *testing.T) {
|
|||||||
|
|
||||||
pod := makePod("fakePod", "fakeContainer", "2", "2")
|
pod := makePod("fakePod", "fakeContainer", "2", "2")
|
||||||
container := &pod.Spec.Containers[0]
|
container := &pod.Spec.Containers[0]
|
||||||
mgr.activePods = func() []*v1.Pod { return []*v1.Pod{pod} }
|
mgr.activePods = func() []*v1.Pod { return nil }
|
||||||
|
|
||||||
err := mgr.Allocate(pod, container)
|
err := mgr.Allocate(pod, container)
|
||||||
if !reflect.DeepEqual(err, testCase.expAllocateErr) {
|
if !reflect.DeepEqual(err, testCase.expAllocateErr) {
|
||||||
@ -1043,7 +1043,7 @@ func TestCPUManagerAddWithResvList(t *testing.T) {
|
|||||||
|
|
||||||
pod := makePod("fakePod", "fakeContainer", "2", "2")
|
pod := makePod("fakePod", "fakeContainer", "2", "2")
|
||||||
container := &pod.Spec.Containers[0]
|
container := &pod.Spec.Containers[0]
|
||||||
mgr.activePods = func() []*v1.Pod { return []*v1.Pod{pod} }
|
mgr.activePods = func() []*v1.Pod { return nil }
|
||||||
|
|
||||||
err := mgr.Allocate(pod, container)
|
err := mgr.Allocate(pod, container)
|
||||||
if !reflect.DeepEqual(err, testCase.expAllocateErr) {
|
if !reflect.DeepEqual(err, testCase.expAllocateErr) {
|
||||||
|
@ -188,6 +188,11 @@ func TestGetTopologyHints(t *testing.T) {
|
|||||||
if len(tc.expectedHints) == 0 && len(hints) == 0 {
|
if len(tc.expectedHints) == 0 && len(hints) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if m.pendingAdmissionPod == nil {
|
||||||
|
t.Errorf("The pendingAdmissionPod should point to the current pod after the call to GetTopologyHints()")
|
||||||
|
}
|
||||||
|
|
||||||
sort.SliceStable(hints, func(i, j int) bool {
|
sort.SliceStable(hints, func(i, j int) bool {
|
||||||
return hints[i].LessThan(hints[j])
|
return hints[i].LessThan(hints[j])
|
||||||
})
|
})
|
||||||
@ -236,6 +241,7 @@ func TestGetPodTopologyHints(t *testing.T) {
|
|||||||
if len(tc.expectedHints) == 0 && len(podHints) == 0 {
|
if len(tc.expectedHints) == 0 && len(podHints) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
sort.SliceStable(podHints, func(i, j int) bool {
|
sort.SliceStable(podHints, func(i, j int) bool {
|
||||||
return podHints[i].LessThan(podHints[j])
|
return podHints[i].LessThan(podHints[j])
|
||||||
})
|
})
|
||||||
|
Loading…
Reference in New Issue
Block a user