mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 20:53:33 +00:00
Add logic to only call Update() if state different than last Update()
Signed-off-by: Kevin Klues <kklues@nvidia.com>
This commit is contained in:
parent
a5cf298a95
commit
6646039481
@ -101,6 +101,9 @@ type manager struct {
|
|||||||
// representation of state for the system to inspect and reconcile.
|
// representation of state for the system to inspect and reconcile.
|
||||||
state state.State
|
state state.State
|
||||||
|
|
||||||
|
// lastUpdatedstate holds state for each container from the last time it was updated.
|
||||||
|
lastUpdateState state.State
|
||||||
|
|
||||||
// containerRuntime is the container runtime service interface needed
|
// containerRuntime is the container runtime service interface needed
|
||||||
// to make UpdateContainerResources() calls against the containers.
|
// to make UpdateContainerResources() calls against the containers.
|
||||||
containerRuntime runtimeService
|
containerRuntime runtimeService
|
||||||
@ -187,6 +190,7 @@ func NewManager(cpuPolicyName string, reconcilePeriod time.Duration, machineInfo
|
|||||||
manager := &manager{
|
manager := &manager{
|
||||||
policy: policy,
|
policy: policy,
|
||||||
reconcilePeriod: reconcilePeriod,
|
reconcilePeriod: reconcilePeriod,
|
||||||
|
lastUpdateState: state.NewMemoryState(),
|
||||||
topology: topo,
|
topology: topo,
|
||||||
nodeAllocatableReservation: nodeAllocatableReservation,
|
nodeAllocatableReservation: nodeAllocatableReservation,
|
||||||
stateFileDirectory: stateFileDirectory,
|
stateFileDirectory: stateFileDirectory,
|
||||||
@ -248,6 +252,9 @@ func (m *manager) Allocate(p *v1.Pod, c *v1.Container) error {
|
|||||||
func (m *manager) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) {
|
func (m *manager) AddContainer(pod *v1.Pod, container *v1.Container, containerID string) {
|
||||||
m.Lock()
|
m.Lock()
|
||||||
defer m.Unlock()
|
defer m.Unlock()
|
||||||
|
if cset, exists := m.state.GetCPUSet(string(pod.UID), container.Name); exists {
|
||||||
|
m.lastUpdateState.SetCPUSet(string(pod.UID), container.Name, cset)
|
||||||
|
}
|
||||||
m.containerMap.Add(string(pod.UID), container.Name, containerID)
|
m.containerMap.Add(string(pod.UID), container.Name, containerID)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -272,6 +279,7 @@ func (m *manager) policyRemoveContainerByID(containerID string) error {
|
|||||||
|
|
||||||
err = m.policy.RemoveContainer(m.state, podUID, containerName)
|
err = m.policy.RemoveContainer(m.state, podUID, containerName)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
m.lastUpdateState.Delete(podUID, containerName)
|
||||||
m.containerMap.RemoveByContainerID(containerID)
|
m.containerMap.RemoveByContainerID(containerID)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -281,6 +289,7 @@ func (m *manager) policyRemoveContainerByID(containerID string) error {
|
|||||||
func (m *manager) policyRemoveContainerByRef(podUID string, containerName string) error {
|
func (m *manager) policyRemoveContainerByRef(podUID string, containerName string) error {
|
||||||
err := m.policy.RemoveContainer(m.state, podUID, containerName)
|
err := m.policy.RemoveContainer(m.state, podUID, containerName)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
|
m.lastUpdateState.Delete(podUID, containerName)
|
||||||
m.containerMap.RemoveByContainerRef(podUID, containerName)
|
m.containerMap.RemoveByContainerRef(podUID, containerName)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -424,6 +433,8 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
lcset := m.lastUpdateState.GetCPUSetOrDefault(string(pod.UID), container.Name)
|
||||||
|
if !cset.Equals(lcset) {
|
||||||
klog.V(4).InfoS("ReconcileState: updating container", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID, "cpuSet", cset)
|
klog.V(4).InfoS("ReconcileState: updating container", "pod", klog.KObj(pod), "containerName", container.Name, "containerID", containerID, "cpuSet", cset)
|
||||||
err = m.updateContainerCPUSet(containerID, cset)
|
err = m.updateContainerCPUSet(containerID, cset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -431,6 +442,8 @@ func (m *manager) reconcileState() (success []reconciledContainer, failure []rec
|
|||||||
failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID})
|
failure = append(failure, reconciledContainer{pod.Name, container.Name, containerID})
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
m.lastUpdateState.SetCPUSet(string(pod.UID), container.Name, cset)
|
||||||
|
}
|
||||||
success = append(success, reconciledContainer{pod.Name, container.Name, containerID})
|
success = append(success, reconciledContainer{pod.Name, container.Name, containerID})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -265,6 +265,7 @@ func TestCPUManagerAdd(t *testing.T) {
|
|||||||
assignments: state.ContainerCPUAssignments{},
|
assignments: state.ContainerCPUAssignments{},
|
||||||
defaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4),
|
defaultCPUSet: cpuset.NewCPUSet(1, 2, 3, 4),
|
||||||
},
|
},
|
||||||
|
lastUpdateState: state.NewMemoryState(),
|
||||||
containerRuntime: mockRuntimeService{
|
containerRuntime: mockRuntimeService{
|
||||||
err: testCase.updateErr,
|
err: testCase.updateErr,
|
||||||
},
|
},
|
||||||
@ -488,6 +489,7 @@ func TestCPUManagerAddWithInitContainers(t *testing.T) {
|
|||||||
mgr := &manager{
|
mgr := &manager{
|
||||||
policy: policy,
|
policy: policy,
|
||||||
state: mockState,
|
state: mockState,
|
||||||
|
lastUpdateState: state.NewMemoryState(),
|
||||||
containerRuntime: mockRuntimeService{},
|
containerRuntime: mockRuntimeService{},
|
||||||
containerMap: containermap.NewContainerMap(),
|
containerMap: containermap.NewContainerMap(),
|
||||||
podStatusProvider: mockPodStatusProvider{},
|
podStatusProvider: mockPodStatusProvider{},
|
||||||
@ -670,6 +672,7 @@ func TestCPUManagerRemove(t *testing.T) {
|
|||||||
assignments: state.ContainerCPUAssignments{},
|
assignments: state.ContainerCPUAssignments{},
|
||||||
defaultCPUSet: cpuset.NewCPUSet(),
|
defaultCPUSet: cpuset.NewCPUSet(),
|
||||||
},
|
},
|
||||||
|
lastUpdateState: state.NewMemoryState(),
|
||||||
containerRuntime: mockRuntimeService{},
|
containerRuntime: mockRuntimeService{},
|
||||||
containerMap: containerMap,
|
containerMap: containerMap,
|
||||||
activePods: func() []*v1.Pod { return nil },
|
activePods: func() []*v1.Pod { return nil },
|
||||||
@ -936,6 +939,7 @@ func TestReconcileState(t *testing.T) {
|
|||||||
assignments: testCase.stAssignments,
|
assignments: testCase.stAssignments,
|
||||||
defaultCPUSet: testCase.stDefaultCPUSet,
|
defaultCPUSet: testCase.stDefaultCPUSet,
|
||||||
},
|
},
|
||||||
|
lastUpdateState: state.NewMemoryState(),
|
||||||
containerRuntime: mockRuntimeService{
|
containerRuntime: mockRuntimeService{
|
||||||
err: testCase.updateErr,
|
err: testCase.updateErr,
|
||||||
},
|
},
|
||||||
@ -1026,6 +1030,7 @@ func TestCPUManagerAddWithResvList(t *testing.T) {
|
|||||||
assignments: state.ContainerCPUAssignments{},
|
assignments: state.ContainerCPUAssignments{},
|
||||||
defaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3),
|
defaultCPUSet: cpuset.NewCPUSet(0, 1, 2, 3),
|
||||||
},
|
},
|
||||||
|
lastUpdateState: state.NewMemoryState(),
|
||||||
containerRuntime: mockRuntimeService{
|
containerRuntime: mockRuntimeService{
|
||||||
err: testCase.updateErr,
|
err: testCase.updateErr,
|
||||||
},
|
},
|
||||||
|
Loading…
Reference in New Issue
Block a user