Merge pull request #111932 from azylinski/rm-lastContainerStartedTime-lru

Cleanup: Remove unused lastContainerStartedTime time.Cache lru
This commit is contained in:
Kubernetes Prow Robot 2022-08-29 09:54:37 -07:00 committed by GitHub
commit 67d75db890
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 0 additions and 133 deletions

View File

@ -528,7 +528,6 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
experimentalHostUserNamespaceDefaulting: utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalHostUserNamespaceDefaultingGate),
keepTerminatedPodVolumes: keepTerminatedPodVolumes,
nodeStatusMaxImages: nodeStatusMaxImages,
lastContainerStartedTime: newTimeCache(),
}
if klet.cloud != nil {
@ -1027,9 +1026,6 @@ type Kubelet struct {
// lastStatusReportTime is the time when node status was last reported.
lastStatusReportTime time.Time
// lastContainerStartedTime is the time of the last ContainerStarted event observed per pod
lastContainerStartedTime *timeCache
// syncNodeStatusMux is a lock on updating the node status, because this path is not thread-safe.
// This lock is used by Kubelet.syncNodeStatus function and shouldn't be used anywhere else.
syncNodeStatusMux sync.Mutex
@ -2122,12 +2118,6 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle
kl.sourcesReady.AddSource(u.Source)
case e := <-plegCh:
if e.Type == pleg.ContainerStarted {
// record the most recent time we observed a container start for this pod.
// this lets us selectively invalidate the runtimeCache when processing a delete for this pod
// to make sure we don't miss handling graceful termination for containers we reported as having started.
kl.lastContainerStartedTime.Add(e.ID, time.Now())
}
if isSyncPodWorthy(e) {
// PLEG event for a pod; sync it.
if pod, ok := kl.podManager.GetPodByUID(e.ID); ok {

View File

@ -388,7 +388,6 @@ func newTestKubeletWithImageList(
kubelet.AddPodSyncLoopHandler(activeDeadlineHandler)
kubelet.AddPodSyncHandler(activeDeadlineHandler)
kubelet.lastContainerStartedTime = newTimeCache()
kubelet.kubeletConfiguration.LocalStorageCapacityIsolation = localStorageCapacityIsolation
return &TestKubelet{kubelet, fakeRuntime, fakeContainerManager, fakeKubeClient, fakeMirrorClient, fakeClock, nil, plug}
}

View File

@ -1,67 +0,0 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"sync"
"time"
"github.com/golang/groupcache/lru"
"k8s.io/apimachinery/pkg/types"
)
// timeCache stores a time keyed by uid
type timeCache struct {
lock sync.Mutex
cache *lru.Cache
}
// maxTimeCacheEntries is the cache entry number in lru cache. 1000 is a proper number
// for our 100 pods per node target. If we support more pods per node in the future, we
// may want to increase the number.
const maxTimeCacheEntries = 1000
func newTimeCache() *timeCache {
return &timeCache{cache: lru.New(maxTimeCacheEntries)}
}
func (c *timeCache) Add(uid types.UID, t time.Time) {
c.lock.Lock()
defer c.lock.Unlock()
c.cache.Add(uid, t)
}
func (c *timeCache) Remove(uid types.UID) {
c.lock.Lock()
defer c.lock.Unlock()
c.cache.Remove(uid)
}
func (c *timeCache) Get(uid types.UID) (time.Time, bool) {
c.lock.Lock()
defer c.lock.Unlock()
value, ok := c.cache.Get(uid)
if !ok {
return time.Time{}, false
}
t, ok := value.(time.Time)
if !ok {
return time.Time{}, false
}
return t, true
}

View File

@ -1,55 +0,0 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubelet
import (
"testing"
"time"
"github.com/golang/groupcache/lru"
)
func TestTimeCache(t *testing.T) {
cache := &timeCache{cache: lru.New(2)}
if a, ok := cache.Get("123"); ok {
t.Errorf("expected cache miss, got %v, %v", a, ok)
}
now := time.Now()
soon := now.Add(time.Minute)
cache.Add("now", now)
cache.Add("soon", soon)
if a, ok := cache.Get("now"); !ok || !a.Equal(now) {
t.Errorf("expected cache hit matching %v, got %v, %v", now, a, ok)
}
if a, ok := cache.Get("soon"); !ok || !a.Equal(soon) {
t.Errorf("expected cache hit matching %v, got %v, %v", soon, a, ok)
}
then := now.Add(-time.Minute)
cache.Add("then", then)
if a, ok := cache.Get("now"); ok {
t.Errorf("expected cache miss from oldest evicted value, got %v, %v", a, ok)
}
if a, ok := cache.Get("soon"); !ok || !a.Equal(soon) {
t.Errorf("expected cache hit matching %v, got %v, %v", soon, a, ok)
}
if a, ok := cache.Get("then"); !ok || !a.Equal(then) {
t.Errorf("expected cache hit matching %v, got %v, %v", then, a, ok)
}
}