From eace46906512b99c23ad9635edc2ea055363a602 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 28 Jun 2022 19:40:58 -0700 Subject: [PATCH 1/4] Add a benchmark for the discovery cache RoundTripper This benchmark is intended to demonstrate a performance improvement gained by removing fsyncs. Refer to the below issue for more detail. https://github.com/kubernetes/kubernetes/issues/110753 Signed-off-by: Nic Cope --- .../cached/disk/round_tripper_test.go | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper_test.go b/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper_test.go index 13002c63d22..4b1d1144bd3 100644 --- a/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper_test.go +++ b/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper_test.go @@ -25,6 +25,8 @@ import ( "path/filepath" "testing" + "github.com/gregjones/httpcache/diskcache" + "github.com/peterbourgon/diskv" "github.com/stretchr/testify/assert" ) @@ -40,6 +42,38 @@ func (rt *testRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) return rt.Response, rt.Err } +// NOTE(negz): We're adding a benchmark for an external dependency in order to +// prove that one that will be added in a subsequent commit improves write +// performance. +func BenchmarkDiskCache(b *testing.B) { + cacheDir, err := ioutil.TempDir("", "cache-rt") + if err != nil { + b.Fatal(err) + } + defer os.RemoveAll(cacheDir) + + d := diskv.New(diskv.Options{ + PathPerm: os.FileMode(0750), + FilePerm: os.FileMode(0660), + BasePath: cacheDir, + TempDir: filepath.Join(cacheDir, ".diskv-temp"), + }) + + k := "localhost:8080/apis/batch/v1.json" + v, err := ioutil.ReadFile("../../testdata/apis/batch/v1.json") + if err != nil { + b.Fatal(err) + } + + c := diskcache.NewWithDiskv(d) + + for n := 0; n < b.N; n++ { + c.Set(k, v) + c.Get(k) + c.Delete(k) + } +} + func TestCacheRoundTripper(t *testing.T) { rt := &testRoundTripper{} cacheDir, err := ioutil.TempDir("", "cache-rt") From 7a2c6a432f9e8db8b84abe5607843429f8bff417 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 28 Jun 2022 19:15:49 -0700 Subject: [PATCH 2/4] Use checksums instead of fsyncs to manage discovery cache corruption Part of the API discovery cache uses an HTTP RoundTripper that transparently caches responses to disk. The upstream implementation of the disk cache is hard coded to call Sync() on every file it writes. This has noticably poor performance on modern Macs, which ask their disk controllers to flush all the way to persistant storage because Go uses the `F_FULLFSYNC` fnctl. Apple recommends minimizing this behaviour in order to avoid degrading performance and increasing disk wear. The content of the discovery cache is not critical; it is indeed just a cache and can be recreated by hitting the API servers' discovery endpoints. This commit replaces upstream httpcache's diskcache implementation with a similar implementation that can use CRC-32 checksums to detect corrupted cache entries at read-time. When such an entry is detected (e.g. because it was only partially flushed to permanent storage before the host lost power) the cache will report a miss. This causes httpcache to fall back to its underlying HTTP transport (i.e. the real API server) and re-cache the resulting value. Apart from adding CRC-32 checksums and avoiding calling fsync this implementation differs from upstream httpcache's diskcache package in that it uses FNV-32a hashes rather than MD5 hashes of cache keys in order to generate filenames. Signed-off-by: Nic Cope --- .../discovery/cached/disk/round_tripper.go | 58 ++++++- .../cached/disk/round_tripper_test.go | 152 +++++++++++++++++- .../httpcache/diskcache/diskcache.go | 61 ------- vendor/modules.txt | 1 - 4 files changed, 203 insertions(+), 69 deletions(-) delete mode 100644 vendor/github.com/gregjones/httpcache/diskcache/diskcache.go diff --git a/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper.go b/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper.go index bda2e5cf4ad..06d82627b01 100644 --- a/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper.go +++ b/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper.go @@ -17,12 +17,15 @@ limitations under the License. package disk import ( + "encoding/binary" + "fmt" + "hash/crc32" + "hash/fnv" "net/http" "os" "path/filepath" "github.com/gregjones/httpcache" - "github.com/gregjones/httpcache/diskcache" "github.com/peterbourgon/diskv" "k8s.io/klog/v2" ) @@ -41,7 +44,7 @@ func newCacheRoundTripper(cacheDir string, rt http.RoundTripper) http.RoundTripp BasePath: cacheDir, TempDir: filepath.Join(cacheDir, ".diskv-temp"), }) - t := httpcache.NewTransport(diskcache.NewWithDiskv(d)) + t := httpcache.NewTransport(&crcDiskCache{disk: d}) t.Transport = rt return &cacheRoundTripper{rt: t} @@ -63,3 +66,54 @@ func (rt *cacheRoundTripper) CancelRequest(req *http.Request) { } func (rt *cacheRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt.Transport } + +// A crcDiskCache is a cache backend for github.com/gregjones/httpcache. It is +// similar to httpcache's diskcache package, but uses checksums to ensure cache +// integrity rather than fsyncing each cache entry in order to avoid performance +// degradation on MacOS. +// +// See https://github.com/kubernetes/kubernetes/issues/110753 for more. +type crcDiskCache struct { + disk *diskv.Diskv +} + +// Get the requested key from the cache on disk. If Get encounters an error, or +// the returned value is not a CRC-32 checksum followed by bytes with a matching +// checksum it will return false to indicate a cache miss. +func (c *crcDiskCache) Get(key string) ([]byte, bool) { + b, err := c.disk.Read(sanitize(key)) + if err != nil || len(b) < binary.MaxVarintLen32 { + return []byte{}, false + } + + response := b[binary.MaxVarintLen32:] + sum, _ := binary.Uvarint(b[:binary.MaxVarintLen32]) + if crc32.ChecksumIEEE(response) != uint32(sum) { + return []byte{}, false + } + + return response, true +} + +// Set writes the response to a file on disk. The filename will be the FNV-32a +// hash of the key. The file will contain the CRC-32 checksum of the response +// bytes, followed by said response bytes. +func (c *crcDiskCache) Set(key string, response []byte) { + sum := make([]byte, binary.MaxVarintLen32) + _ = binary.PutUvarint(sum, uint64(crc32.ChecksumIEEE(response))) + _ = c.disk.Write(sanitize(key), append(sum, response...)) // Nothing we can do with this error. +} + +func (c *crcDiskCache) Delete(key string) { + _ = c.disk.Erase(sanitize(key)) // Nothing we can do with this error. +} + +// Sanitize an httpcache key such that it can be used as a diskv key, which must +// be a valid filename. The httpcache key will either be the requested URL (if +// the request method was GET) or " " for other methods, per the +// httpcache.cacheKey function. +func sanitize(key string) string { + h := fnv.New32a() + _, _ = h.Write([]byte(key)) // Writing to a hash never returns an error. + return fmt.Sprintf("%X", h.Sum32()) +} diff --git a/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper_test.go b/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper_test.go index 4b1d1144bd3..245d83b6053 100644 --- a/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper_test.go +++ b/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper_test.go @@ -18,6 +18,8 @@ package disk import ( "bytes" + "encoding/binary" + "hash/crc32" "io/ioutil" "net/http" "net/url" @@ -25,7 +27,6 @@ import ( "path/filepath" "testing" - "github.com/gregjones/httpcache/diskcache" "github.com/peterbourgon/diskv" "github.com/stretchr/testify/assert" ) @@ -42,9 +43,6 @@ func (rt *testRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) return rt.Response, rt.Err } -// NOTE(negz): We're adding a benchmark for an external dependency in order to -// prove that one that will be added in a subsequent commit improves write -// performance. func BenchmarkDiskCache(b *testing.B) { cacheDir, err := ioutil.TempDir("", "cache-rt") if err != nil { @@ -65,7 +63,7 @@ func BenchmarkDiskCache(b *testing.B) { b.Fatal(err) } - c := diskcache.NewWithDiskv(d) + c := crcDiskCache{disk: d} for n := 0; n < b.N; n++ { c.Set(k, v) @@ -179,3 +177,147 @@ func TestCacheRoundTripperPathPerm(t *testing.T) { }) assert.NoError(err) } + +func TestCRCDiskCache(t *testing.T) { + assert := assert.New(t) + + // Ensure that we'll return a cache miss if the backing file doesn't exist. + t.Run("NoSuchKey", func(t *testing.T) { + cacheDir, err := ioutil.TempDir("", "cache-crc") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(cacheDir) + d := diskv.New(diskv.Options{BasePath: cacheDir, TempDir: filepath.Join(cacheDir, ".diskv-temp")}) + crc := &crcDiskCache{disk: d} + + key := "testing" + + got, ok := crc.Get(key) + assert.False(ok) + assert.Equal([]byte{}, got) + }) + + // Ensure that we'll return a cache miss if the backing file is empty. + t.Run("EmptyFile", func(t *testing.T) { + cacheDir, err := ioutil.TempDir("", "cache-crc") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(cacheDir) + d := diskv.New(diskv.Options{BasePath: cacheDir, TempDir: filepath.Join(cacheDir, ".diskv-temp")}) + crc := &crcDiskCache{disk: d} + + key := "testing" + + f, err := os.Create(filepath.Join(cacheDir, sanitize(key))) + if err != nil { + t.Fatal(err) + } + f.Close() + + got, ok := crc.Get(key) + assert.False(ok) + assert.Equal([]byte{}, got) + }) + + // Ensure that we'll return a cache miss if the backing has an invalid + // checksum. + t.Run("InvalidChecksum", func(t *testing.T) { + cacheDir, err := ioutil.TempDir("", "cache-crc") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(cacheDir) + d := diskv.New(diskv.Options{BasePath: cacheDir, TempDir: filepath.Join(cacheDir, ".diskv-temp")}) + crc := &crcDiskCache{disk: d} + + key := "testing" + value := []byte("testing") + mismatchedValue := []byte("testink") + sum := make([]byte, binary.MaxVarintLen32) + binary.PutUvarint(sum, uint64(crc32.ChecksumIEEE(value))) + + // Create a file with the checksum of 'value' followed by the bytes of + // 'mismatchedValue'. + f, err := os.Create(filepath.Join(cacheDir, sanitize(key))) + if err != nil { + t.Fatal(err) + } + f.Write(sum) + f.Write(mismatchedValue) + f.Close() + + // The mismatched checksum should result in a cache miss. + got, ok := crc.Get(key) + assert.False(ok) + assert.Equal([]byte{}, got) + }) + + // Ensure that our disk cache will happily cache over the top of an existing + // value. We depend on this behaviour to recover from corrupted cache + // entries. When Get detects a bad checksum it will return a cache miss. + // This should cause httpcache to fall back to its underlying transport and + // to subsequently cache the new value, overwriting the corrupt one. + t.Run("OverwriteExistingKey", func(t *testing.T) { + cacheDir, err := ioutil.TempDir("", "cache-crc") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(cacheDir) + d := diskv.New(diskv.Options{BasePath: cacheDir, TempDir: filepath.Join(cacheDir, ".diskv-temp")}) + crc := &crcDiskCache{disk: d} + + key := "testing" + value := []byte("cool value!") + + // Write a value. + crc.Set(key, value) + got, ok := crc.Get(key) + + // Ensure we can read back what we wrote. + assert.True(ok) + assert.Equal(value, got) + + differentValue := []byte("I'm different!") + + // Write a different value. + crc.Set(key, differentValue) + got, ok = crc.Get(key) + + // Ensure we can read back the different value. + assert.True(ok) + assert.Equal(differentValue, got) + }) + + // Ensure that deleting a key does in fact delete it. + t.Run("DeleteKey", func(t *testing.T) { + cacheDir, err := ioutil.TempDir("", "cache-crc") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(cacheDir) + d := diskv.New(diskv.Options{BasePath: cacheDir, TempDir: filepath.Join(cacheDir, ".diskv-temp")}) + crc := &crcDiskCache{disk: d} + + key := "testing" + value := []byte("coolValue") + + crc.Set(key, value) + + // Ensure we successfully set the value. + got, ok := crc.Get(key) + assert.True(ok) + assert.Equal(value, got) + + crc.Delete(key) + + // Ensure the value is gone. + got, ok = crc.Get(key) + assert.False(ok) + assert.Equal([]byte{}, got) + + // Ensure that deleting a non-existent value is a no-op. + crc.Delete(key) + }) +} diff --git a/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go deleted file mode 100644 index 42e3129d823..00000000000 --- a/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go +++ /dev/null @@ -1,61 +0,0 @@ -// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package -// to supplement an in-memory map with persistent storage -// -package diskcache - -import ( - "bytes" - "crypto/md5" - "encoding/hex" - "github.com/peterbourgon/diskv" - "io" -) - -// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage -type Cache struct { - d *diskv.Diskv -} - -// Get returns the response corresponding to key if present -func (c *Cache) Get(key string) (resp []byte, ok bool) { - key = keyToFilename(key) - resp, err := c.d.Read(key) - if err != nil { - return []byte{}, false - } - return resp, true -} - -// Set saves a response to the cache as key -func (c *Cache) Set(key string, resp []byte) { - key = keyToFilename(key) - c.d.WriteStream(key, bytes.NewReader(resp), true) -} - -// Delete removes the response with key from the cache -func (c *Cache) Delete(key string) { - key = keyToFilename(key) - c.d.Erase(key) -} - -func keyToFilename(key string) string { - h := md5.New() - io.WriteString(h, key) - return hex.EncodeToString(h.Sum(nil)) -} - -// New returns a new Cache that will store files in basePath -func New(basePath string) *Cache { - return &Cache{ - d: diskv.New(diskv.Options{ - BasePath: basePath, - CacheSizeMax: 100 * 1024 * 1024, // 100MB - }), - } -} - -// NewWithDiskv returns a new Cache using the provided Diskv as underlying -// storage. -func NewWithDiskv(d *diskv.Diskv) *Cache { - return &Cache{d} -} diff --git a/vendor/modules.txt b/vendor/modules.txt index c55eba077cb..e2f2dc5ca71 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -507,7 +507,6 @@ github.com/gorilla/websocket # github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 => github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 ## explicit github.com/gregjones/httpcache -github.com/gregjones/httpcache/diskcache # github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 => github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 ## explicit; go 1.14 github.com/grpc-ecosystem/go-grpc-middleware From 288a17fd337c65cb5aea44e44ecb74e9cb8088f5 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Thu, 14 Jul 2022 14:11:33 -0700 Subject: [PATCH 3/4] Use sha256 to sanitize discovery HTTP cache keys This helps avoid (potentially malicious) collisions when reading and writing cache data. Signed-off-by: Nic Cope --- .../client-go/discovery/cached/disk/round_tripper.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper.go b/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper.go index 06d82627b01..a7cea6e7402 100644 --- a/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper.go +++ b/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper.go @@ -17,10 +17,10 @@ limitations under the License. package disk import ( + "crypto/sha256" "encoding/binary" "fmt" "hash/crc32" - "hash/fnv" "net/http" "os" "path/filepath" @@ -95,7 +95,7 @@ func (c *crcDiskCache) Get(key string) ([]byte, bool) { return response, true } -// Set writes the response to a file on disk. The filename will be the FNV-32a +// Set writes the response to a file on disk. The filename will be the SHA256 // hash of the key. The file will contain the CRC-32 checksum of the response // bytes, followed by said response bytes. func (c *crcDiskCache) Set(key string, response []byte) { @@ -113,7 +113,8 @@ func (c *crcDiskCache) Delete(key string) { // the request method was GET) or " " for other methods, per the // httpcache.cacheKey function. func sanitize(key string) string { - h := fnv.New32a() - _, _ = h.Write([]byte(key)) // Writing to a hash never returns an error. - return fmt.Sprintf("%X", h.Sum32()) + // These keys are not sensitive. We use sha256 to avoid a (potentially + // malicious) collision causing the wrong cache data to be written or + // accessed. + return fmt.Sprintf("%x", sha256.Sum256([]byte(key))) } From c5957c284e1d23bdadc98fbbe2bb481fc1f345d4 Mon Sep 17 00:00:00 2001 From: Nic Cope Date: Tue, 26 Jul 2022 23:51:01 -0700 Subject: [PATCH 4/4] Use SHA256 sums to verify discovery cache integrity This is a little more computationally expensive but reduces the likelihood of a potentially malicious cache collision. Signed-off-by: Nic Cope --- .../discovery/cached/disk/round_tripper.go | 42 +++++++------- .../cached/disk/round_tripper_test.go | 58 +++++++++---------- 2 files changed, 49 insertions(+), 51 deletions(-) diff --git a/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper.go b/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper.go index a7cea6e7402..f3a4b2947f3 100644 --- a/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper.go +++ b/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper.go @@ -17,10 +17,9 @@ limitations under the License. package disk import ( + "bytes" "crypto/sha256" - "encoding/binary" "fmt" - "hash/crc32" "net/http" "os" "path/filepath" @@ -44,7 +43,7 @@ func newCacheRoundTripper(cacheDir string, rt http.RoundTripper) http.RoundTripp BasePath: cacheDir, TempDir: filepath.Join(cacheDir, ".diskv-temp"), }) - t := httpcache.NewTransport(&crcDiskCache{disk: d}) + t := httpcache.NewTransport(&sumDiskCache{disk: d}) t.Transport = rt return &cacheRoundTripper{rt: t} @@ -67,28 +66,30 @@ func (rt *cacheRoundTripper) CancelRequest(req *http.Request) { func (rt *cacheRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt.Transport } -// A crcDiskCache is a cache backend for github.com/gregjones/httpcache. It is -// similar to httpcache's diskcache package, but uses checksums to ensure cache -// integrity rather than fsyncing each cache entry in order to avoid performance -// degradation on MacOS. +// A sumDiskCache is a cache backend for github.com/gregjones/httpcache. It is +// similar to httpcache's diskcache package, but uses SHA256 sums to ensure +// cache integrity at read time rather than fsyncing each cache entry to +// increase the likelihood they will be persisted at write time. This avoids +// significant performance degradation on MacOS. // // See https://github.com/kubernetes/kubernetes/issues/110753 for more. -type crcDiskCache struct { +type sumDiskCache struct { disk *diskv.Diskv } // Get the requested key from the cache on disk. If Get encounters an error, or -// the returned value is not a CRC-32 checksum followed by bytes with a matching +// the returned value is not a SHA256 sum followed by bytes with a matching // checksum it will return false to indicate a cache miss. -func (c *crcDiskCache) Get(key string) ([]byte, bool) { +func (c *sumDiskCache) Get(key string) ([]byte, bool) { b, err := c.disk.Read(sanitize(key)) - if err != nil || len(b) < binary.MaxVarintLen32 { + if err != nil || len(b) < sha256.Size { return []byte{}, false } - response := b[binary.MaxVarintLen32:] - sum, _ := binary.Uvarint(b[:binary.MaxVarintLen32]) - if crc32.ChecksumIEEE(response) != uint32(sum) { + response := b[sha256.Size:] + want := b[:sha256.Size] // The first 32 bytes of the file should be the SHA256 sum. + got := sha256.Sum256(response) + if !bytes.Equal(want, got[:]) { return []byte{}, false } @@ -96,15 +97,14 @@ func (c *crcDiskCache) Get(key string) ([]byte, bool) { } // Set writes the response to a file on disk. The filename will be the SHA256 -// hash of the key. The file will contain the CRC-32 checksum of the response -// bytes, followed by said response bytes. -func (c *crcDiskCache) Set(key string, response []byte) { - sum := make([]byte, binary.MaxVarintLen32) - _ = binary.PutUvarint(sum, uint64(crc32.ChecksumIEEE(response))) - _ = c.disk.Write(sanitize(key), append(sum, response...)) // Nothing we can do with this error. +// sum of the key. The file will contain a SHA256 sum of the response bytes, +// followed by said response bytes. +func (c *sumDiskCache) Set(key string, response []byte) { + s := sha256.Sum256(response) + _ = c.disk.Write(sanitize(key), append(s[:], response...)) // Nothing we can do with this error. } -func (c *crcDiskCache) Delete(key string) { +func (c *sumDiskCache) Delete(key string) { _ = c.disk.Erase(sanitize(key)) // Nothing we can do with this error. } diff --git a/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper_test.go b/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper_test.go index 245d83b6053..5f1626c9aad 100644 --- a/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper_test.go +++ b/staging/src/k8s.io/client-go/discovery/cached/disk/round_tripper_test.go @@ -18,8 +18,7 @@ package disk import ( "bytes" - "encoding/binary" - "hash/crc32" + "crypto/sha256" "io/ioutil" "net/http" "net/url" @@ -63,7 +62,7 @@ func BenchmarkDiskCache(b *testing.B) { b.Fatal(err) } - c := crcDiskCache{disk: d} + c := sumDiskCache{disk: d} for n := 0; n < b.N; n++ { c.Set(k, v) @@ -178,35 +177,35 @@ func TestCacheRoundTripperPathPerm(t *testing.T) { assert.NoError(err) } -func TestCRCDiskCache(t *testing.T) { +func TestSumDiskCache(t *testing.T) { assert := assert.New(t) // Ensure that we'll return a cache miss if the backing file doesn't exist. t.Run("NoSuchKey", func(t *testing.T) { - cacheDir, err := ioutil.TempDir("", "cache-crc") + cacheDir, err := ioutil.TempDir("", "cache-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(cacheDir) d := diskv.New(diskv.Options{BasePath: cacheDir, TempDir: filepath.Join(cacheDir, ".diskv-temp")}) - crc := &crcDiskCache{disk: d} + c := &sumDiskCache{disk: d} key := "testing" - got, ok := crc.Get(key) + got, ok := c.Get(key) assert.False(ok) assert.Equal([]byte{}, got) }) // Ensure that we'll return a cache miss if the backing file is empty. t.Run("EmptyFile", func(t *testing.T) { - cacheDir, err := ioutil.TempDir("", "cache-crc") + cacheDir, err := ioutil.TempDir("", "cache-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(cacheDir) d := diskv.New(diskv.Options{BasePath: cacheDir, TempDir: filepath.Join(cacheDir, ".diskv-temp")}) - crc := &crcDiskCache{disk: d} + c := &sumDiskCache{disk: d} key := "testing" @@ -216,7 +215,7 @@ func TestCRCDiskCache(t *testing.T) { } f.Close() - got, ok := crc.Get(key) + got, ok := c.Get(key) assert.False(ok) assert.Equal([]byte{}, got) }) @@ -224,32 +223,31 @@ func TestCRCDiskCache(t *testing.T) { // Ensure that we'll return a cache miss if the backing has an invalid // checksum. t.Run("InvalidChecksum", func(t *testing.T) { - cacheDir, err := ioutil.TempDir("", "cache-crc") + cacheDir, err := ioutil.TempDir("", "cache-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(cacheDir) d := diskv.New(diskv.Options{BasePath: cacheDir, TempDir: filepath.Join(cacheDir, ".diskv-temp")}) - crc := &crcDiskCache{disk: d} + c := &sumDiskCache{disk: d} key := "testing" value := []byte("testing") mismatchedValue := []byte("testink") - sum := make([]byte, binary.MaxVarintLen32) - binary.PutUvarint(sum, uint64(crc32.ChecksumIEEE(value))) + sum := sha256.Sum256(value) - // Create a file with the checksum of 'value' followed by the bytes of + // Create a file with the sum of 'value' followed by the bytes of // 'mismatchedValue'. f, err := os.Create(filepath.Join(cacheDir, sanitize(key))) if err != nil { t.Fatal(err) } - f.Write(sum) + f.Write(sum[:]) f.Write(mismatchedValue) f.Close() // The mismatched checksum should result in a cache miss. - got, ok := crc.Get(key) + got, ok := c.Get(key) assert.False(ok) assert.Equal([]byte{}, got) }) @@ -260,20 +258,20 @@ func TestCRCDiskCache(t *testing.T) { // This should cause httpcache to fall back to its underlying transport and // to subsequently cache the new value, overwriting the corrupt one. t.Run("OverwriteExistingKey", func(t *testing.T) { - cacheDir, err := ioutil.TempDir("", "cache-crc") + cacheDir, err := ioutil.TempDir("", "cache-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(cacheDir) d := diskv.New(diskv.Options{BasePath: cacheDir, TempDir: filepath.Join(cacheDir, ".diskv-temp")}) - crc := &crcDiskCache{disk: d} + c := &sumDiskCache{disk: d} key := "testing" value := []byte("cool value!") // Write a value. - crc.Set(key, value) - got, ok := crc.Get(key) + c.Set(key, value) + got, ok := c.Get(key) // Ensure we can read back what we wrote. assert.True(ok) @@ -282,8 +280,8 @@ func TestCRCDiskCache(t *testing.T) { differentValue := []byte("I'm different!") // Write a different value. - crc.Set(key, differentValue) - got, ok = crc.Get(key) + c.Set(key, differentValue) + got, ok = c.Get(key) // Ensure we can read back the different value. assert.True(ok) @@ -292,32 +290,32 @@ func TestCRCDiskCache(t *testing.T) { // Ensure that deleting a key does in fact delete it. t.Run("DeleteKey", func(t *testing.T) { - cacheDir, err := ioutil.TempDir("", "cache-crc") + cacheDir, err := ioutil.TempDir("", "cache-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(cacheDir) d := diskv.New(diskv.Options{BasePath: cacheDir, TempDir: filepath.Join(cacheDir, ".diskv-temp")}) - crc := &crcDiskCache{disk: d} + c := &sumDiskCache{disk: d} key := "testing" value := []byte("coolValue") - crc.Set(key, value) + c.Set(key, value) // Ensure we successfully set the value. - got, ok := crc.Get(key) + got, ok := c.Get(key) assert.True(ok) assert.Equal(value, got) - crc.Delete(key) + c.Delete(key) // Ensure the value is gone. - got, ok = crc.Get(key) + got, ok = c.Get(key) assert.False(ok) assert.Equal([]byte{}, got) // Ensure that deleting a non-existent value is a no-op. - crc.Delete(key) + c.Delete(key) }) }