mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 02:41:25 +00:00
Merge pull request #111932 from azylinski/rm-lastContainerStartedTime-lru
Cleanup: Remove unused lastContainerStartedTime time.Cache lru
This commit is contained in:
commit
67d75db890
@ -528,7 +528,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||||||
experimentalHostUserNamespaceDefaulting: utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalHostUserNamespaceDefaultingGate),
|
experimentalHostUserNamespaceDefaulting: utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalHostUserNamespaceDefaultingGate),
|
||||||
keepTerminatedPodVolumes: keepTerminatedPodVolumes,
|
keepTerminatedPodVolumes: keepTerminatedPodVolumes,
|
||||||
nodeStatusMaxImages: nodeStatusMaxImages,
|
nodeStatusMaxImages: nodeStatusMaxImages,
|
||||||
lastContainerStartedTime: newTimeCache(),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if klet.cloud != nil {
|
if klet.cloud != nil {
|
||||||
@ -1027,9 +1026,6 @@ type Kubelet struct {
|
|||||||
// lastStatusReportTime is the time when node status was last reported.
|
// lastStatusReportTime is the time when node status was last reported.
|
||||||
lastStatusReportTime time.Time
|
lastStatusReportTime time.Time
|
||||||
|
|
||||||
// lastContainerStartedTime is the time of the last ContainerStarted event observed per pod
|
|
||||||
lastContainerStartedTime *timeCache
|
|
||||||
|
|
||||||
// syncNodeStatusMux is a lock on updating the node status, because this path is not thread-safe.
|
// syncNodeStatusMux is a lock on updating the node status, because this path is not thread-safe.
|
||||||
// This lock is used by Kubelet.syncNodeStatus function and shouldn't be used anywhere else.
|
// This lock is used by Kubelet.syncNodeStatus function and shouldn't be used anywhere else.
|
||||||
syncNodeStatusMux sync.Mutex
|
syncNodeStatusMux sync.Mutex
|
||||||
@ -2122,12 +2118,6 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle
|
|||||||
kl.sourcesReady.AddSource(u.Source)
|
kl.sourcesReady.AddSource(u.Source)
|
||||||
|
|
||||||
case e := <-plegCh:
|
case e := <-plegCh:
|
||||||
if e.Type == pleg.ContainerStarted {
|
|
||||||
// record the most recent time we observed a container start for this pod.
|
|
||||||
// this lets us selectively invalidate the runtimeCache when processing a delete for this pod
|
|
||||||
// to make sure we don't miss handling graceful termination for containers we reported as having started.
|
|
||||||
kl.lastContainerStartedTime.Add(e.ID, time.Now())
|
|
||||||
}
|
|
||||||
if isSyncPodWorthy(e) {
|
if isSyncPodWorthy(e) {
|
||||||
// PLEG event for a pod; sync it.
|
// PLEG event for a pod; sync it.
|
||||||
if pod, ok := kl.podManager.GetPodByUID(e.ID); ok {
|
if pod, ok := kl.podManager.GetPodByUID(e.ID); ok {
|
||||||
|
@ -388,7 +388,6 @@ func newTestKubeletWithImageList(
|
|||||||
|
|
||||||
kubelet.AddPodSyncLoopHandler(activeDeadlineHandler)
|
kubelet.AddPodSyncLoopHandler(activeDeadlineHandler)
|
||||||
kubelet.AddPodSyncHandler(activeDeadlineHandler)
|
kubelet.AddPodSyncHandler(activeDeadlineHandler)
|
||||||
kubelet.lastContainerStartedTime = newTimeCache()
|
|
||||||
kubelet.kubeletConfiguration.LocalStorageCapacityIsolation = localStorageCapacityIsolation
|
kubelet.kubeletConfiguration.LocalStorageCapacityIsolation = localStorageCapacityIsolation
|
||||||
return &TestKubelet{kubelet, fakeRuntime, fakeContainerManager, fakeKubeClient, fakeMirrorClient, fakeClock, nil, plug}
|
return &TestKubelet{kubelet, fakeRuntime, fakeContainerManager, fakeKubeClient, fakeMirrorClient, fakeClock, nil, plug}
|
||||||
}
|
}
|
||||||
|
@ -1,67 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2020 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package kubelet
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/golang/groupcache/lru"
|
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
// timeCache stores a time keyed by uid
|
|
||||||
type timeCache struct {
|
|
||||||
lock sync.Mutex
|
|
||||||
cache *lru.Cache
|
|
||||||
}
|
|
||||||
|
|
||||||
// maxTimeCacheEntries is the cache entry number in lru cache. 1000 is a proper number
|
|
||||||
// for our 100 pods per node target. If we support more pods per node in the future, we
|
|
||||||
// may want to increase the number.
|
|
||||||
const maxTimeCacheEntries = 1000
|
|
||||||
|
|
||||||
func newTimeCache() *timeCache {
|
|
||||||
return &timeCache{cache: lru.New(maxTimeCacheEntries)}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *timeCache) Add(uid types.UID, t time.Time) {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
c.cache.Add(uid, t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *timeCache) Remove(uid types.UID) {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
c.cache.Remove(uid)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *timeCache) Get(uid types.UID) (time.Time, bool) {
|
|
||||||
c.lock.Lock()
|
|
||||||
defer c.lock.Unlock()
|
|
||||||
value, ok := c.cache.Get(uid)
|
|
||||||
if !ok {
|
|
||||||
return time.Time{}, false
|
|
||||||
}
|
|
||||||
t, ok := value.(time.Time)
|
|
||||||
if !ok {
|
|
||||||
return time.Time{}, false
|
|
||||||
}
|
|
||||||
return t, true
|
|
||||||
}
|
|
@ -1,55 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2020 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package kubelet
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/golang/groupcache/lru"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestTimeCache(t *testing.T) {
|
|
||||||
cache := &timeCache{cache: lru.New(2)}
|
|
||||||
if a, ok := cache.Get("123"); ok {
|
|
||||||
t.Errorf("expected cache miss, got %v, %v", a, ok)
|
|
||||||
}
|
|
||||||
|
|
||||||
now := time.Now()
|
|
||||||
soon := now.Add(time.Minute)
|
|
||||||
cache.Add("now", now)
|
|
||||||
cache.Add("soon", soon)
|
|
||||||
|
|
||||||
if a, ok := cache.Get("now"); !ok || !a.Equal(now) {
|
|
||||||
t.Errorf("expected cache hit matching %v, got %v, %v", now, a, ok)
|
|
||||||
}
|
|
||||||
if a, ok := cache.Get("soon"); !ok || !a.Equal(soon) {
|
|
||||||
t.Errorf("expected cache hit matching %v, got %v, %v", soon, a, ok)
|
|
||||||
}
|
|
||||||
|
|
||||||
then := now.Add(-time.Minute)
|
|
||||||
cache.Add("then", then)
|
|
||||||
if a, ok := cache.Get("now"); ok {
|
|
||||||
t.Errorf("expected cache miss from oldest evicted value, got %v, %v", a, ok)
|
|
||||||
}
|
|
||||||
if a, ok := cache.Get("soon"); !ok || !a.Equal(soon) {
|
|
||||||
t.Errorf("expected cache hit matching %v, got %v, %v", soon, a, ok)
|
|
||||||
}
|
|
||||||
if a, ok := cache.Get("then"); !ok || !a.Equal(then) {
|
|
||||||
t.Errorf("expected cache hit matching %v, got %v, %v", then, a, ok)
|
|
||||||
}
|
|
||||||
}
|
|
Loading…
Reference in New Issue
Block a user