Remove old reason cache

This commit is contained in:
Random-Liu
2016-01-27 00:08:49 -08:00
committed by Lantao Liu
parent 2b7d0182ca
commit 45e3a1f596

View File

@@ -33,7 +33,6 @@ import (
"github.com/coreos/go-semver/semver"
docker "github.com/fsouza/go-dockerclient"
"github.com/golang/glog"
"github.com/golang/groupcache/lru"
cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
@@ -62,8 +61,6 @@ const (
MinimumDockerAPIVersion = "1.18"
maxReasonCacheEntries = 200
// ndots specifies the minimum number of dots that a domain name must contain for the resolver to consider it as FQDN (fully-qualified)
// we want to able to consider SRV lookup names like _dns._udp.kube-dns.default.svc to be considered relative.
// hence, setting ndots to be 5.
@@ -98,15 +95,7 @@ type DockerManager struct {
// The image name of the pod infra container.
podInfraContainerImage string
// reasonCache stores the failure reason of the last container creation
// and/or start in a string, keyed by <pod_UID>_<container_name>. The goal
// is to propagate this reason to the container status. This endeavor is
// "best-effort" for two reasons:
// 1. The cache is not persisted.
// 2. We use an LRU cache to avoid extra garbage collection work. This
// means that some entries may be recycled before a pod has been
// deleted.
reasonCache reasonInfoCache
// TODO(yifan): Record the pull failure so we can eliminate the image checking?
// Lower level docker image puller.
dockerPuller DockerPuller
@@ -185,8 +174,6 @@ func NewDockerManager(
glog.Infof("Setting dockerRoot to %s", dockerRoot)
}
reasonCache := reasonInfoCache{cache: lru.New(maxReasonCacheEntries)}
dm := &DockerManager{
client: client,
recorder: recorder,
@@ -194,7 +181,6 @@ func NewDockerManager(
os: osInterface,
machineInfo: machineInfo,
podInfraContainerImage: podInfraContainerImage,
reasonCache: reasonCache,
dockerPuller: newDockerPuller(client, qps, burst),
dockerRoot: dockerRoot,
containerLogsDir: containerLogsDir,
@@ -218,43 +204,6 @@ func NewDockerManager(
return dm
}
// A cache which stores strings keyed by <pod_UID>_<container_name>.
type reasonInfoCache struct {
lock sync.RWMutex
cache *lru.Cache
}
type reasonInfo struct {
reason string
message string
}
func (sc *reasonInfoCache) composeKey(uid types.UID, name string) string {
return fmt.Sprintf("%s_%s", uid, name)
}
func (sc *reasonInfoCache) Add(uid types.UID, name string, reason, message string) {
sc.lock.Lock()
defer sc.lock.Unlock()
sc.cache.Add(sc.composeKey(uid, name), reasonInfo{reason, message})
}
func (sc *reasonInfoCache) Remove(uid types.UID, name string) {
sc.lock.Lock()
defer sc.lock.Unlock()
sc.cache.Remove(sc.composeKey(uid, name))
}
func (sc *reasonInfoCache) Get(uid types.UID, name string) (reasonInfo, bool) {
sc.lock.RLock()
defer sc.lock.RUnlock()
value, ok := sc.cache.Get(sc.composeKey(uid, name))
if ok {
return value.(reasonInfo), ok
} else {
return reasonInfo{"", ""}, ok
}
}
// GetContainerLogs returns logs of a specific container. By
// default, it returns a snapshot of the container log. Set 'follow' to true to
// stream the log. Set 'follow' to false and specify the number of lines (e.g.
@@ -1682,20 +1631,6 @@ func (dm *DockerManager) computePodContainerChanges(pod *api.Pod, podStatus *kub
}, nil
}
// updateReasonCache updates the failure reason based on the registered error.
func (dm *DockerManager) updateReasonCache(pod *api.Pod, container *api.Container, briefError string, err error) {
if briefError == "" || err == nil {
return
}
errString := err.Error()
dm.reasonCache.Add(pod.UID, container.Name, briefError, errString)
}
// clearReasonCache removes the entry in the reason cache.
func (dm *DockerManager) clearReasonCache(pod *api.Pod, container *api.Container) {
dm.reasonCache.Remove(pod.UID, container.Name)
}
// Sync the running pod to match the specified desired pod.
func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubecontainer.PodStatus, pullSecrets []api.Secret, backOff *util.Backoff) (result kubecontainer.PodSyncResult) {
start := time.Now()
@@ -1836,13 +1771,11 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
err, msg := dm.imagePuller.PullImage(pod, container, pullSecrets)
if err != nil {
startContainerResult.Fail(err, msg)
dm.updateReasonCache(pod, container, err.Error(), errors.New(msg))
continue
}
if container.SecurityContext != nil && container.SecurityContext.RunAsNonRoot != nil && *container.SecurityContext.RunAsNonRoot {
err := dm.verifyNonRoot(container)
dm.updateReasonCache(pod, container, kubecontainer.ErrVerifyNonRoot.Error(), err)
if err != nil {
startContainerResult.Fail(kubecontainer.ErrVerifyNonRoot, err.Error())
glog.Errorf("Error running pod %q container %q: %v", format.Pod(pod), container.Name, err)
@@ -1863,7 +1796,6 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
// See createPodInfraContainer for infra container setup.
namespaceMode := fmt.Sprintf("container:%v", podInfraContainerID)
_, err = dm.runContainerInPod(pod, container, namespaceMode, namespaceMode, getPidMode(pod), restartCount)
dm.updateReasonCache(pod, container, kubecontainer.ErrRunContainer.Error(), err)
if err != nil {
startContainerResult.Fail(kubecontainer.ErrRunContainer, err.Error())
// TODO(bburns) : Perhaps blacklist a container after N failures?
@@ -1871,8 +1803,6 @@ func (dm *DockerManager) SyncPod(pod *api.Pod, _ api.PodStatus, podStatus *kubec
continue
}
// Successfully started the container; clear the entry in the failure
// reason cache.
dm.clearReasonCache(pod, container)
}
return
}
@@ -1955,14 +1885,12 @@ func (dm *DockerManager) doBackOff(pod *api.Pod, container *api.Container, podSt
dm.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.BackOffStartContainer, "Back-off restarting failed docker container")
}
err := fmt.Errorf("Back-off %s restarting failed container=%s pod=%s", backOff.Get(stableName), container.Name, format.Pod(pod))
dm.updateReasonCache(pod, container, kubecontainer.ErrCrashLoopBackOff.Error(), err)
glog.Infof("%s", err.Error())
return true, kubecontainer.ErrCrashLoopBackOff, err.Error()
}
backOff.Next(stableName, ts)
}
dm.clearReasonCache(pod, container)
return false, nil, ""
}