Merge pull request #4327 from a-robinson/timecache

Remove the unused TimeCache file and its tests.
This commit is contained in:
roberthbailey 2015-02-11 11:46:51 -08:00
commit 15c57efde2
2 changed files with 0 additions and 316 deletions

View File

@ -1,133 +0,0 @@
/*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"sync"
"time"
)
// T stands in for any type in TimeCache
// Should make it easy to use this as a template for an autogenerator
// if we ever start doing that.
type T interface{}
type TimeCache interface {
// Get will fetch an item from the cache if
// it is present and recent enough.
Get(key string) T
}
type timeCacheEntry struct {
item T
lastUpdate time.Time
}
type timeCache struct {
clock Clock
fillFunc func(string) T
ttl time.Duration
inFlight map[string]chan T
inFlightLock sync.Mutex
cache map[string]timeCacheEntry
lock sync.RWMutex
}
// NewTimeCache returns a cache which calls fill to fill its entries, and
// forgets entries after ttl has passed.
func NewTimeCache(clock Clock, ttl time.Duration, fill func(key string) T) TimeCache {
return &timeCache{
clock: clock,
fillFunc: fill,
inFlight: map[string]chan T{},
cache: map[string]timeCacheEntry{},
ttl: ttl,
}
}
// Get returns the value of key from the cache, if it is present
// and recent enough; otherwise, it blocks while it gets the value.
func (c *timeCache) Get(key string) T {
if item, ok := c.get(key); ok {
return item
}
// We need to fill the cache. Calling the function could be
// expensive, so do it while unlocked.
wait := c.fillOrWait(key)
item := <-wait
// Put it back in the channel in case there's multiple waiters
// (this channel is non-blocking)
wait <- item
return item
}
// returns the item and true if it is found and not expired, otherwise nil and false.
// If this returns false, it has locked c.inFlightLock and it is caller's responsibility
// to unlock that.
func (c *timeCache) get(key string) (T, bool) {
c.lock.RLock()
defer c.lock.RUnlock()
data, ok := c.cache[key]
now := c.clock.Now()
if !ok || now.Sub(data.lastUpdate) > c.ttl {
// We must lock this while we hold c.lock-- otherwise, a writer could
// write to c.cache and remove the channel from c.inFlight before we
// manage to read c.inFlight.
c.inFlightLock.Lock()
return nil, false
}
return data.item, true
}
// c.inFlightLock MUST be locked before calling this. fillOrWait will unlock it.
func (c *timeCache) fillOrWait(key string) chan T {
defer c.inFlightLock.Unlock()
// Already a call in progress?
if current, ok := c.inFlight[key]; ok {
return current
}
// We are the first, so we have to make the call.
result := make(chan T, 1) // non-blocking
c.inFlight[key] = result
go func() {
// Make potentially slow call.
// While this call is in flight, fillOrWait will
// presumably exit.
data := timeCacheEntry{
item: c.fillFunc(key),
lastUpdate: c.clock.Now(),
}
result <- data.item
// Store in cache
c.lock.Lock()
defer c.lock.Unlock()
c.cache[key] = data
// Remove in flight entry
c.inFlightLock.Lock()
defer c.inFlightLock.Unlock()
delete(c.inFlight, key)
}()
return result
}

View File

@ -1,183 +0,0 @@
/*
Copyright 2015 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"math/rand"
"runtime"
"sync"
"testing"
"time"
fuzz "github.com/google/gofuzz"
)
func TestCacheExpire(t *testing.T) {
calls := map[string]int{}
ff := func(key string) T { calls[key]++; return key }
clock := &FakeClock{time.Now()}
c := NewTimeCache(clock, 60*time.Second, ff)
c.Get("foo")
c.Get("bar")
// This call should hit the cache, so we expect no additional calls
c.Get("foo")
// Advance the clock, this call should miss the cache, so expect one more call.
clock.Time = clock.Time.Add(61 * time.Second)
c.Get("foo")
c.Get("bar")
if e, a := 2, calls["foo"]; e != a {
t.Errorf("Wrong number of calls for foo: wanted %v, got %v", e, a)
}
if e, a := 2, calls["bar"]; e != a {
t.Errorf("Wrong number of calls for bar: wanted %v, got %v", e, a)
}
}
func TestCacheNotExpire(t *testing.T) {
calls := map[string]int{}
ff := func(key string) T { calls[key]++; return key }
clock := &FakeClock{time.Now()}
c := NewTimeCache(clock, 60*time.Second, ff)
c.Get("foo")
// This call should hit the cache, so we expect no additional calls to the cloud
clock.Time = clock.Time.Add(60 * time.Second)
c.Get("foo")
if e, a := 1, calls["foo"]; e != a {
t.Errorf("Wrong number of calls for foo: wanted %v, got %v", e, a)
}
}
func TestCacheParallel(t *testing.T) {
ff := func(key string) T { time.Sleep(time.Second); return key }
clock := &FakeClock{time.Now()}
c := NewTimeCache(clock, 60*time.Second, ff)
// Make some keys
keys := []string{}
fuzz.New().NilChance(0).NumElements(50, 50).Fuzz(&keys)
// If we have high parallelism, this will take only a second.
var wg sync.WaitGroup
wg.Add(len(keys))
for _, key := range keys {
go func(key string) {
c.Get(key)
wg.Done()
}(key)
}
wg.Wait()
}
func TestCacheParallelOneCall(t *testing.T) {
calls := 0
var callLock sync.Mutex
ff := func(key string) T {
time.Sleep(time.Second)
callLock.Lock()
defer callLock.Unlock()
calls++
return key
}
clock := &FakeClock{time.Now()}
c := NewTimeCache(clock, 60*time.Second, ff)
// If we have high parallelism, this will take only a second.
var wg sync.WaitGroup
wg.Add(50)
for i := 0; i < 50; i++ {
go func(key string) {
c.Get(key)
wg.Done()
}("aoeu")
}
wg.Wait()
// And if we wait for existing calls, we should have only one call.
if e, a := 1, calls; e != a {
t.Errorf("Expected %v, got %v", e, a)
}
}
func TestCacheParallelNoDeadlocksNoDoubleCalls(t *testing.T) {
// Make 50 random keys
keys := []string{}
fuzz.New().NilChance(0).NumElements(50, 50).Fuzz(&keys)
// Data structure for tracking when each key is accessed.
type callTrack struct {
sync.Mutex
accessTimes []time.Time
}
calls := map[string]*callTrack{}
for _, k := range keys {
calls[k] = &callTrack{}
}
// This is called to fill the cache in the case of a cache miss
// or cache entry expiration. We record the time.
ff := func(key string) T {
ct := calls[key]
ct.Lock()
ct.accessTimes = append(ct.accessTimes, time.Now())
ct.Unlock()
// make sure that there is time for multiple requests to come in
// for the same key before this returns.
time.Sleep(time.Millisecond)
return key
}
cacheDur := 10 * time.Millisecond
c := NewTimeCache(RealClock{}, cacheDur, ff)
// Spawn a bunch of goroutines, each of which sequentially requests
// 500 random keys from the cache.
runtime.GOMAXPROCS(16)
var wg sync.WaitGroup
for i := 0; i < 500; i++ {
wg.Add(1)
go func(seed int64) {
r := rand.New(rand.NewSource(seed))
for i := 0; i < 500; i++ {
c.Get(keys[r.Intn(len(keys))])
}
wg.Done()
}(rand.Int63())
}
wg.Wait()
// Since the cache should hold things for 10ms, no calls for a given key
// should be more closely spaced than that.
for k, ct := range calls {
if len(ct.accessTimes) < 2 {
continue
}
cur := ct.accessTimes[0]
for i := 1; i < len(ct.accessTimes); i++ {
next := ct.accessTimes[i]
if next.Sub(cur) < cacheDur {
t.Errorf("%v was called at %v and %v", k, cur, next)
}
cur = next
}
}
}