diff --git a/go.mod b/go.mod
index 5da10f61..b832ceb6 100644
--- a/go.mod
+++ b/go.mod
@@ -50,6 +50,7 @@ require (
github.com/opencontainers/image-spec v1.0.1
github.com/otiai10/copy v1.2.1-0.20200916181228-26f84a0b1578
github.com/pelletier/go-toml v1.9.4 // indirect
+ github.com/peterbourgon/diskv v2.0.1+incompatible
github.com/philopon/go-toposort v0.0.0-20170620085441-9be86dbd762f
github.com/pkg/errors v0.9.1
github.com/pterm/pterm v0.12.32-0.20211002183613-ada9ef6790c3
diff --git a/go.sum b/go.sum
index 9f457080..2329916d 100644
--- a/go.sum
+++ b/go.sum
@@ -537,6 +537,7 @@ github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAO
github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8=
github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93 h1:jc2UWq7CbdszqeH6qu1ougXMIUBfSy8Pbh/anURYbGI=
github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
@@ -809,8 +810,6 @@ github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7P
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
github.com/mudler/cobra-extensions v0.0.0-20200612154940-31a47105fe3d h1:fKh+rvwZQCA+TPzK0EMwwbqhjvRHaQ6H8AsVU1Wt+NQ=
github.com/mudler/cobra-extensions v0.0.0-20200612154940-31a47105fe3d/go.mod h1:puRUWSwyecW2V355tKncwPVPRAjQBduPsFjG0mrV/Nw=
-github.com/mudler/go-pluggable v0.0.0-20210513155700-54c6443073af h1:jixIxEgLSqu24eMiyzfCI+roa5IaOUhF546ePSFyHeY=
-github.com/mudler/go-pluggable v0.0.0-20210513155700-54c6443073af/go.mod h1:WmKcT8ONmhDQIqQ+HxU+tkGWjzBEyY/KFO8LTGCu4AI=
github.com/mudler/go-pluggable v0.0.0-20211022125509-94dbf124830d h1:NKvvf/q1dWDde+yg5cMiU5EuYZ2jNuKs/9hb8xod8A0=
github.com/mudler/go-pluggable v0.0.0-20211022125509-94dbf124830d/go.mod h1:WmKcT8ONmhDQIqQ+HxU+tkGWjzBEyY/KFO8LTGCu4AI=
github.com/mudler/topsort v0.0.0-20201103161459-db5c7901c290 h1:426hFyXMpXeqIeGJn2cGAW9ogvM2Jf+Jv23gtVPvBLM=
@@ -909,6 +908,7 @@ github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCko
github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
+github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
github.com/philopon/go-toposort v0.0.0-20170620085441-9be86dbd762f h1:WyCn68lTiytVSkk7W1K9nBiSGTSRlUOdyTnSjwrIlok=
diff --git a/vendor/github.com/google/btree/.travis.yml b/vendor/github.com/google/btree/.travis.yml
new file mode 100644
index 00000000..4f2ee4d9
--- /dev/null
+++ b/vendor/github.com/google/btree/.travis.yml
@@ -0,0 +1 @@
+language: go
diff --git a/vendor/github.com/google/btree/LICENSE b/vendor/github.com/google/btree/LICENSE
new file mode 100644
index 00000000..d6456956
--- /dev/null
+++ b/vendor/github.com/google/btree/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md
new file mode 100644
index 00000000..6062a4da
--- /dev/null
+++ b/vendor/github.com/google/btree/README.md
@@ -0,0 +1,12 @@
+# BTree implementation for Go
+
+
+
+This package provides an in-memory B-Tree implementation for Go, useful as
+an ordered, mutable data structure.
+
+The API is based off of the wonderful
+http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to
+act as a drop-in replacement for gollrb trees.
+
+See http://godoc.org/github.com/google/btree for documentation.
diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go
new file mode 100644
index 00000000..6ff062f9
--- /dev/null
+++ b/vendor/github.com/google/btree/btree.go
@@ -0,0 +1,890 @@
+// Copyright 2014 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package btree implements in-memory B-Trees of arbitrary degree.
+//
+// btree implements an in-memory B-Tree for use as an ordered data structure.
+// It is not meant for persistent storage solutions.
+//
+// It has a flatter structure than an equivalent red-black or other binary tree,
+// which in some cases yields better memory usage and/or performance.
+// See some discussion on the matter here:
+// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
+// Note, though, that this project is in no way related to the C++ B-Tree
+// implementation written about there.
+//
+// Within this tree, each node contains a slice of items and a (possibly nil)
+// slice of children. For basic numeric values or raw structs, this can cause
+// efficiency differences when compared to equivalent C++ template code that
+// stores values in arrays within the node:
+// * Due to the overhead of storing values as interfaces (each
+// value needs to be stored as the value itself, then 2 words for the
+// interface pointing to that value and its type), resulting in higher
+// memory use.
+// * Since interfaces can point to values anywhere in memory, values are
+// most likely not stored in contiguous blocks, resulting in a higher
+// number of cache misses.
+// These issues don't tend to matter, though, when working with strings or other
+// heap-allocated structures, since C++-equivalent structures also must store
+// pointers and also distribute their values across the heap.
+//
+// This implementation is designed to be a drop-in replacement to gollrb.LLRB
+// trees, (http://github.com/petar/gollrb), an excellent and probably the most
+// widely used ordered tree implementation in the Go ecosystem currently.
+// Its functions, therefore, exactly mirror those of
+// llrb.LLRB where possible. Unlike gollrb, though, we currently don't
+// support storing multiple equivalent values.
+package btree
+
+import (
+ "fmt"
+ "io"
+ "sort"
+ "strings"
+ "sync"
+)
+
+// Item represents a single object in the tree.
+type Item interface {
+ // Less tests whether the current item is less than the given argument.
+ //
+ // This must provide a strict weak ordering.
+ // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
+ // hold one of either a or b in the tree).
+ Less(than Item) bool
+}
+
+const (
+ DefaultFreeListSize = 32
+)
+
+var (
+ nilItems = make(items, 16)
+ nilChildren = make(children, 16)
+)
+
+// FreeList represents a free list of btree nodes. By default each
+// BTree has its own FreeList, but multiple BTrees can share the same
+// FreeList.
+// Two Btrees using the same freelist are safe for concurrent write access.
+type FreeList struct {
+ mu sync.Mutex
+ freelist []*node
+}
+
+// NewFreeList creates a new free list.
+// size is the maximum size of the returned free list.
+func NewFreeList(size int) *FreeList {
+ return &FreeList{freelist: make([]*node, 0, size)}
+}
+
+func (f *FreeList) newNode() (n *node) {
+ f.mu.Lock()
+ index := len(f.freelist) - 1
+ if index < 0 {
+ f.mu.Unlock()
+ return new(node)
+ }
+ n = f.freelist[index]
+ f.freelist[index] = nil
+ f.freelist = f.freelist[:index]
+ f.mu.Unlock()
+ return
+}
+
+// freeNode adds the given node to the list, returning true if it was added
+// and false if it was discarded.
+func (f *FreeList) freeNode(n *node) (out bool) {
+ f.mu.Lock()
+ if len(f.freelist) < cap(f.freelist) {
+ f.freelist = append(f.freelist, n)
+ out = true
+ }
+ f.mu.Unlock()
+ return
+}
+
+// ItemIterator allows callers of Ascend* to iterate in-order over portions of
+// the tree. When this function returns false, iteration will stop and the
+// associated Ascend* function will immediately return.
+type ItemIterator func(i Item) bool
+
+// New creates a new B-Tree with the given degree.
+//
+// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
+// and 2-4 children).
+func New(degree int) *BTree {
+ return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize))
+}
+
+// NewWithFreeList creates a new B-Tree that uses the given node free list.
+func NewWithFreeList(degree int, f *FreeList) *BTree {
+ if degree <= 1 {
+ panic("bad degree")
+ }
+ return &BTree{
+ degree: degree,
+ cow: ©OnWriteContext{freelist: f},
+ }
+}
+
+// items stores items in a node.
+type items []Item
+
+// insertAt inserts a value into the given index, pushing all subsequent values
+// forward.
+func (s *items) insertAt(index int, item Item) {
+ *s = append(*s, nil)
+ if index < len(*s) {
+ copy((*s)[index+1:], (*s)[index:])
+ }
+ (*s)[index] = item
+}
+
+// removeAt removes a value at a given index, pulling all subsequent values
+// back.
+func (s *items) removeAt(index int) Item {
+ item := (*s)[index]
+ copy((*s)[index:], (*s)[index+1:])
+ (*s)[len(*s)-1] = nil
+ *s = (*s)[:len(*s)-1]
+ return item
+}
+
+// pop removes and returns the last element in the list.
+func (s *items) pop() (out Item) {
+ index := len(*s) - 1
+ out = (*s)[index]
+ (*s)[index] = nil
+ *s = (*s)[:index]
+ return
+}
+
+// truncate truncates this instance at index so that it contains only the
+// first index items. index must be less than or equal to length.
+func (s *items) truncate(index int) {
+ var toClear items
+ *s, toClear = (*s)[:index], (*s)[index:]
+ for len(toClear) > 0 {
+ toClear = toClear[copy(toClear, nilItems):]
+ }
+}
+
+// find returns the index where the given item should be inserted into this
+// list. 'found' is true if the item already exists in the list at the given
+// index.
+func (s items) find(item Item) (index int, found bool) {
+ i := sort.Search(len(s), func(i int) bool {
+ return item.Less(s[i])
+ })
+ if i > 0 && !s[i-1].Less(item) {
+ return i - 1, true
+ }
+ return i, false
+}
+
+// children stores child nodes in a node.
+type children []*node
+
+// insertAt inserts a value into the given index, pushing all subsequent values
+// forward.
+func (s *children) insertAt(index int, n *node) {
+ *s = append(*s, nil)
+ if index < len(*s) {
+ copy((*s)[index+1:], (*s)[index:])
+ }
+ (*s)[index] = n
+}
+
+// removeAt removes a value at a given index, pulling all subsequent values
+// back.
+func (s *children) removeAt(index int) *node {
+ n := (*s)[index]
+ copy((*s)[index:], (*s)[index+1:])
+ (*s)[len(*s)-1] = nil
+ *s = (*s)[:len(*s)-1]
+ return n
+}
+
+// pop removes and returns the last element in the list.
+func (s *children) pop() (out *node) {
+ index := len(*s) - 1
+ out = (*s)[index]
+ (*s)[index] = nil
+ *s = (*s)[:index]
+ return
+}
+
+// truncate truncates this instance at index so that it contains only the
+// first index children. index must be less than or equal to length.
+func (s *children) truncate(index int) {
+ var toClear children
+ *s, toClear = (*s)[:index], (*s)[index:]
+ for len(toClear) > 0 {
+ toClear = toClear[copy(toClear, nilChildren):]
+ }
+}
+
+// node is an internal node in a tree.
+//
+// It must at all times maintain the invariant that either
+// * len(children) == 0, len(items) unconstrained
+// * len(children) == len(items) + 1
+type node struct {
+ items items
+ children children
+ cow *copyOnWriteContext
+}
+
+func (n *node) mutableFor(cow *copyOnWriteContext) *node {
+ if n.cow == cow {
+ return n
+ }
+ out := cow.newNode()
+ if cap(out.items) >= len(n.items) {
+ out.items = out.items[:len(n.items)]
+ } else {
+ out.items = make(items, len(n.items), cap(n.items))
+ }
+ copy(out.items, n.items)
+ // Copy children
+ if cap(out.children) >= len(n.children) {
+ out.children = out.children[:len(n.children)]
+ } else {
+ out.children = make(children, len(n.children), cap(n.children))
+ }
+ copy(out.children, n.children)
+ return out
+}
+
+func (n *node) mutableChild(i int) *node {
+ c := n.children[i].mutableFor(n.cow)
+ n.children[i] = c
+ return c
+}
+
+// split splits the given node at the given index. The current node shrinks,
+// and this function returns the item that existed at that index and a new node
+// containing all items/children after it.
+func (n *node) split(i int) (Item, *node) {
+ item := n.items[i]
+ next := n.cow.newNode()
+ next.items = append(next.items, n.items[i+1:]...)
+ n.items.truncate(i)
+ if len(n.children) > 0 {
+ next.children = append(next.children, n.children[i+1:]...)
+ n.children.truncate(i + 1)
+ }
+ return item, next
+}
+
+// maybeSplitChild checks if a child should be split, and if so splits it.
+// Returns whether or not a split occurred.
+func (n *node) maybeSplitChild(i, maxItems int) bool {
+ if len(n.children[i].items) < maxItems {
+ return false
+ }
+ first := n.mutableChild(i)
+ item, second := first.split(maxItems / 2)
+ n.items.insertAt(i, item)
+ n.children.insertAt(i+1, second)
+ return true
+}
+
+// insert inserts an item into the subtree rooted at this node, making sure
+// no nodes in the subtree exceed maxItems items. Should an equivalent item be
+// be found/replaced by insert, it will be returned.
+func (n *node) insert(item Item, maxItems int) Item {
+ i, found := n.items.find(item)
+ if found {
+ out := n.items[i]
+ n.items[i] = item
+ return out
+ }
+ if len(n.children) == 0 {
+ n.items.insertAt(i, item)
+ return nil
+ }
+ if n.maybeSplitChild(i, maxItems) {
+ inTree := n.items[i]
+ switch {
+ case item.Less(inTree):
+ // no change, we want first split node
+ case inTree.Less(item):
+ i++ // we want second split node
+ default:
+ out := n.items[i]
+ n.items[i] = item
+ return out
+ }
+ }
+ return n.mutableChild(i).insert(item, maxItems)
+}
+
+// get finds the given key in the subtree and returns it.
+func (n *node) get(key Item) Item {
+ i, found := n.items.find(key)
+ if found {
+ return n.items[i]
+ } else if len(n.children) > 0 {
+ return n.children[i].get(key)
+ }
+ return nil
+}
+
+// min returns the first item in the subtree.
+func min(n *node) Item {
+ if n == nil {
+ return nil
+ }
+ for len(n.children) > 0 {
+ n = n.children[0]
+ }
+ if len(n.items) == 0 {
+ return nil
+ }
+ return n.items[0]
+}
+
+// max returns the last item in the subtree.
+func max(n *node) Item {
+ if n == nil {
+ return nil
+ }
+ for len(n.children) > 0 {
+ n = n.children[len(n.children)-1]
+ }
+ if len(n.items) == 0 {
+ return nil
+ }
+ return n.items[len(n.items)-1]
+}
+
+// toRemove details what item to remove in a node.remove call.
+type toRemove int
+
+const (
+ removeItem toRemove = iota // removes the given item
+ removeMin // removes smallest item in the subtree
+ removeMax // removes largest item in the subtree
+)
+
+// remove removes an item from the subtree rooted at this node.
+func (n *node) remove(item Item, minItems int, typ toRemove) Item {
+ var i int
+ var found bool
+ switch typ {
+ case removeMax:
+ if len(n.children) == 0 {
+ return n.items.pop()
+ }
+ i = len(n.items)
+ case removeMin:
+ if len(n.children) == 0 {
+ return n.items.removeAt(0)
+ }
+ i = 0
+ case removeItem:
+ i, found = n.items.find(item)
+ if len(n.children) == 0 {
+ if found {
+ return n.items.removeAt(i)
+ }
+ return nil
+ }
+ default:
+ panic("invalid type")
+ }
+ // If we get to here, we have children.
+ if len(n.children[i].items) <= minItems {
+ return n.growChildAndRemove(i, item, minItems, typ)
+ }
+ child := n.mutableChild(i)
+ // Either we had enough items to begin with, or we've done some
+ // merging/stealing, because we've got enough now and we're ready to return
+ // stuff.
+ if found {
+ // The item exists at index 'i', and the child we've selected can give us a
+ // predecessor, since if we've gotten here it's got > minItems items in it.
+ out := n.items[i]
+ // We use our special-case 'remove' call with typ=maxItem to pull the
+ // predecessor of item i (the rightmost leaf of our immediate left child)
+ // and set it into where we pulled the item from.
+ n.items[i] = child.remove(nil, minItems, removeMax)
+ return out
+ }
+ // Final recursive call. Once we're here, we know that the item isn't in this
+ // node and that the child is big enough to remove from.
+ return child.remove(item, minItems, typ)
+}
+
+// growChildAndRemove grows child 'i' to make sure it's possible to remove an
+// item from it while keeping it at minItems, then calls remove to actually
+// remove it.
+//
+// Most documentation says we have to do two sets of special casing:
+// 1) item is in this node
+// 2) item is in child
+// In both cases, we need to handle the two subcases:
+// A) node has enough values that it can spare one
+// B) node doesn't have enough values
+// For the latter, we have to check:
+// a) left sibling has node to spare
+// b) right sibling has node to spare
+// c) we must merge
+// To simplify our code here, we handle cases #1 and #2 the same:
+// If a node doesn't have enough items, we make sure it does (using a,b,c).
+// We then simply redo our remove call, and the second time (regardless of
+// whether we're in case 1 or 2), we'll have enough items and can guarantee
+// that we hit case A.
+func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item {
+ if i > 0 && len(n.children[i-1].items) > minItems {
+ // Steal from left child
+ child := n.mutableChild(i)
+ stealFrom := n.mutableChild(i - 1)
+ stolenItem := stealFrom.items.pop()
+ child.items.insertAt(0, n.items[i-1])
+ n.items[i-1] = stolenItem
+ if len(stealFrom.children) > 0 {
+ child.children.insertAt(0, stealFrom.children.pop())
+ }
+ } else if i < len(n.items) && len(n.children[i+1].items) > minItems {
+ // steal from right child
+ child := n.mutableChild(i)
+ stealFrom := n.mutableChild(i + 1)
+ stolenItem := stealFrom.items.removeAt(0)
+ child.items = append(child.items, n.items[i])
+ n.items[i] = stolenItem
+ if len(stealFrom.children) > 0 {
+ child.children = append(child.children, stealFrom.children.removeAt(0))
+ }
+ } else {
+ if i >= len(n.items) {
+ i--
+ }
+ child := n.mutableChild(i)
+ // merge with right child
+ mergeItem := n.items.removeAt(i)
+ mergeChild := n.children.removeAt(i + 1)
+ child.items = append(child.items, mergeItem)
+ child.items = append(child.items, mergeChild.items...)
+ child.children = append(child.children, mergeChild.children...)
+ n.cow.freeNode(mergeChild)
+ }
+ return n.remove(item, minItems, typ)
+}
+
+type direction int
+
+const (
+ descend = direction(-1)
+ ascend = direction(+1)
+)
+
+// iterate provides a simple method for iterating over elements in the tree.
+//
+// When ascending, the 'start' should be less than 'stop' and when descending,
+// the 'start' should be greater than 'stop'. Setting 'includeStart' to true
+// will force the iterator to include the first item when it equals 'start',
+// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a
+// "greaterThan" or "lessThan" queries.
+func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) {
+ var ok, found bool
+ var index int
+ switch dir {
+ case ascend:
+ if start != nil {
+ index, _ = n.items.find(start)
+ }
+ for i := index; i < len(n.items); i++ {
+ if len(n.children) > 0 {
+ if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+ return hit, false
+ }
+ }
+ if !includeStart && !hit && start != nil && !start.Less(n.items[i]) {
+ hit = true
+ continue
+ }
+ hit = true
+ if stop != nil && !n.items[i].Less(stop) {
+ return hit, false
+ }
+ if !iter(n.items[i]) {
+ return hit, false
+ }
+ }
+ if len(n.children) > 0 {
+ if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+ return hit, false
+ }
+ }
+ case descend:
+ if start != nil {
+ index, found = n.items.find(start)
+ if !found {
+ index = index - 1
+ }
+ } else {
+ index = len(n.items) - 1
+ }
+ for i := index; i >= 0; i-- {
+ if start != nil && !n.items[i].Less(start) {
+ if !includeStart || hit || start.Less(n.items[i]) {
+ continue
+ }
+ }
+ if len(n.children) > 0 {
+ if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+ return hit, false
+ }
+ }
+ if stop != nil && !stop.Less(n.items[i]) {
+ return hit, false // continue
+ }
+ hit = true
+ if !iter(n.items[i]) {
+ return hit, false
+ }
+ }
+ if len(n.children) > 0 {
+ if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok {
+ return hit, false
+ }
+ }
+ }
+ return hit, true
+}
+
+// Used for testing/debugging purposes.
+func (n *node) print(w io.Writer, level int) {
+ fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items)
+ for _, c := range n.children {
+ c.print(w, level+1)
+ }
+}
+
+// BTree is an implementation of a B-Tree.
+//
+// BTree stores Item instances in an ordered structure, allowing easy insertion,
+// removal, and iteration.
+//
+// Write operations are not safe for concurrent mutation by multiple
+// goroutines, but Read operations are.
+type BTree struct {
+ degree int
+ length int
+ root *node
+ cow *copyOnWriteContext
+}
+
+// copyOnWriteContext pointers determine node ownership... a tree with a write
+// context equivalent to a node's write context is allowed to modify that node.
+// A tree whose write context does not match a node's is not allowed to modify
+// it, and must create a new, writable copy (IE: it's a Clone).
+//
+// When doing any write operation, we maintain the invariant that the current
+// node's context is equal to the context of the tree that requested the write.
+// We do this by, before we descend into any node, creating a copy with the
+// correct context if the contexts don't match.
+//
+// Since the node we're currently visiting on any write has the requesting
+// tree's context, that node is modifiable in place. Children of that node may
+// not share context, but before we descend into them, we'll make a mutable
+// copy.
+type copyOnWriteContext struct {
+ freelist *FreeList
+}
+
+// Clone clones the btree, lazily. Clone should not be called concurrently,
+// but the original tree (t) and the new tree (t2) can be used concurrently
+// once the Clone call completes.
+//
+// The internal tree structure of b is marked read-only and shared between t and
+// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes
+// whenever one of b's original nodes would have been modified. Read operations
+// should have no performance degredation. Write operations for both t and t2
+// will initially experience minor slow-downs caused by additional allocs and
+// copies due to the aforementioned copy-on-write logic, but should converge to
+// the original performance characteristics of the original tree.
+func (t *BTree) Clone() (t2 *BTree) {
+ // Create two entirely new copy-on-write contexts.
+ // This operation effectively creates three trees:
+ // the original, shared nodes (old b.cow)
+ // the new b.cow nodes
+ // the new out.cow nodes
+ cow1, cow2 := *t.cow, *t.cow
+ out := *t
+ t.cow = &cow1
+ out.cow = &cow2
+ return &out
+}
+
+// maxItems returns the max number of items to allow per node.
+func (t *BTree) maxItems() int {
+ return t.degree*2 - 1
+}
+
+// minItems returns the min number of items to allow per node (ignored for the
+// root node).
+func (t *BTree) minItems() int {
+ return t.degree - 1
+}
+
+func (c *copyOnWriteContext) newNode() (n *node) {
+ n = c.freelist.newNode()
+ n.cow = c
+ return
+}
+
+type freeType int
+
+const (
+ ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist)
+ ftStored // node was stored in the freelist for later use
+ ftNotOwned // node was ignored by COW, since it's owned by another one
+)
+
+// freeNode frees a node within a given COW context, if it's owned by that
+// context. It returns what happened to the node (see freeType const
+// documentation).
+func (c *copyOnWriteContext) freeNode(n *node) freeType {
+ if n.cow == c {
+ // clear to allow GC
+ n.items.truncate(0)
+ n.children.truncate(0)
+ n.cow = nil
+ if c.freelist.freeNode(n) {
+ return ftStored
+ } else {
+ return ftFreelistFull
+ }
+ } else {
+ return ftNotOwned
+ }
+}
+
+// ReplaceOrInsert adds the given item to the tree. If an item in the tree
+// already equals the given one, it is removed from the tree and returned.
+// Otherwise, nil is returned.
+//
+// nil cannot be added to the tree (will panic).
+func (t *BTree) ReplaceOrInsert(item Item) Item {
+ if item == nil {
+ panic("nil item being added to BTree")
+ }
+ if t.root == nil {
+ t.root = t.cow.newNode()
+ t.root.items = append(t.root.items, item)
+ t.length++
+ return nil
+ } else {
+ t.root = t.root.mutableFor(t.cow)
+ if len(t.root.items) >= t.maxItems() {
+ item2, second := t.root.split(t.maxItems() / 2)
+ oldroot := t.root
+ t.root = t.cow.newNode()
+ t.root.items = append(t.root.items, item2)
+ t.root.children = append(t.root.children, oldroot, second)
+ }
+ }
+ out := t.root.insert(item, t.maxItems())
+ if out == nil {
+ t.length++
+ }
+ return out
+}
+
+// Delete removes an item equal to the passed in item from the tree, returning
+// it. If no such item exists, returns nil.
+func (t *BTree) Delete(item Item) Item {
+ return t.deleteItem(item, removeItem)
+}
+
+// DeleteMin removes the smallest item in the tree and returns it.
+// If no such item exists, returns nil.
+func (t *BTree) DeleteMin() Item {
+ return t.deleteItem(nil, removeMin)
+}
+
+// DeleteMax removes the largest item in the tree and returns it.
+// If no such item exists, returns nil.
+func (t *BTree) DeleteMax() Item {
+ return t.deleteItem(nil, removeMax)
+}
+
+func (t *BTree) deleteItem(item Item, typ toRemove) Item {
+ if t.root == nil || len(t.root.items) == 0 {
+ return nil
+ }
+ t.root = t.root.mutableFor(t.cow)
+ out := t.root.remove(item, t.minItems(), typ)
+ if len(t.root.items) == 0 && len(t.root.children) > 0 {
+ oldroot := t.root
+ t.root = t.root.children[0]
+ t.cow.freeNode(oldroot)
+ }
+ if out != nil {
+ t.length--
+ }
+ return out
+}
+
+// AscendRange calls the iterator for every value in the tree within the range
+// [greaterOrEqual, lessThan), until iterator returns false.
+func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator)
+}
+
+// AscendLessThan calls the iterator for every value in the tree within the range
+// [first, pivot), until iterator returns false.
+func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(ascend, nil, pivot, false, false, iterator)
+}
+
+// AscendGreaterOrEqual calls the iterator for every value in the tree within
+// the range [pivot, last], until iterator returns false.
+func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(ascend, pivot, nil, true, false, iterator)
+}
+
+// Ascend calls the iterator for every value in the tree within the range
+// [first, last], until iterator returns false.
+func (t *BTree) Ascend(iterator ItemIterator) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(ascend, nil, nil, false, false, iterator)
+}
+
+// DescendRange calls the iterator for every value in the tree within the range
+// [lessOrEqual, greaterThan), until iterator returns false.
+func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator)
+}
+
+// DescendLessOrEqual calls the iterator for every value in the tree within the range
+// [pivot, first], until iterator returns false.
+func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(descend, pivot, nil, true, false, iterator)
+}
+
+// DescendGreaterThan calls the iterator for every value in the tree within
+// the range (pivot, last], until iterator returns false.
+func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(descend, nil, pivot, false, false, iterator)
+}
+
+// Descend calls the iterator for every value in the tree within the range
+// [last, first], until iterator returns false.
+func (t *BTree) Descend(iterator ItemIterator) {
+ if t.root == nil {
+ return
+ }
+ t.root.iterate(descend, nil, nil, false, false, iterator)
+}
+
+// Get looks for the key item in the tree, returning it. It returns nil if
+// unable to find that item.
+func (t *BTree) Get(key Item) Item {
+ if t.root == nil {
+ return nil
+ }
+ return t.root.get(key)
+}
+
+// Min returns the smallest item in the tree, or nil if the tree is empty.
+func (t *BTree) Min() Item {
+ return min(t.root)
+}
+
+// Max returns the largest item in the tree, or nil if the tree is empty.
+func (t *BTree) Max() Item {
+ return max(t.root)
+}
+
+// Has returns true if the given key is in the tree.
+func (t *BTree) Has(key Item) bool {
+ return t.Get(key) != nil
+}
+
+// Len returns the number of items currently in the tree.
+func (t *BTree) Len() int {
+ return t.length
+}
+
+// Clear removes all items from the btree. If addNodesToFreelist is true,
+// t's nodes are added to its freelist as part of this call, until the freelist
+// is full. Otherwise, the root node is simply dereferenced and the subtree
+// left to Go's normal GC processes.
+//
+// This can be much faster
+// than calling Delete on all elements, because that requires finding/removing
+// each element in the tree and updating the tree accordingly. It also is
+// somewhat faster than creating a new tree to replace the old one, because
+// nodes from the old tree are reclaimed into the freelist for use by the new
+// one, instead of being lost to the garbage collector.
+//
+// This call takes:
+// O(1): when addNodesToFreelist is false, this is a single operation.
+// O(1): when the freelist is already full, it breaks out immediately
+// O(freelist size): when the freelist is empty and the nodes are all owned
+// by this tree, nodes are added to the freelist until full.
+// O(tree size): when all nodes are owned by another tree, all nodes are
+// iterated over looking for nodes to add to the freelist, and due to
+// ownership, none are.
+func (t *BTree) Clear(addNodesToFreelist bool) {
+ if t.root != nil && addNodesToFreelist {
+ t.root.reset(t.cow)
+ }
+ t.root, t.length = nil, 0
+}
+
+// reset returns a subtree to the freelist. It breaks out immediately if the
+// freelist is full, since the only benefit of iterating is to fill that
+// freelist up. Returns true if parent reset call should continue.
+func (n *node) reset(c *copyOnWriteContext) bool {
+ for _, child := range n.children {
+ if !child.reset(c) {
+ return false
+ }
+ }
+ return c.freeNode(n) != ftFreelistFull
+}
+
+// Int implements the Item interface for integers.
+type Int int
+
+// Less returns true if int(a) < int(b).
+func (a Int) Less(b Item) bool {
+ return a < b.(Int)
+}
diff --git a/vendor/github.com/peterbourgon/diskv/LICENSE b/vendor/github.com/peterbourgon/diskv/LICENSE
new file mode 100644
index 00000000..41ce7f16
--- /dev/null
+++ b/vendor/github.com/peterbourgon/diskv/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2011-2012 Peter Bourgon
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/peterbourgon/diskv/README.md b/vendor/github.com/peterbourgon/diskv/README.md
new file mode 100644
index 00000000..3474739e
--- /dev/null
+++ b/vendor/github.com/peterbourgon/diskv/README.md
@@ -0,0 +1,141 @@
+# What is diskv?
+
+Diskv (disk-vee) is a simple, persistent key-value store written in the Go
+language. It starts with an incredibly simple API for storing arbitrary data on
+a filesystem by key, and builds several layers of performance-enhancing
+abstraction on top. The end result is a conceptually simple, but highly
+performant, disk-backed storage system.
+
+[![Build Status][1]][2]
+
+[1]: https://drone.io/github.com/peterbourgon/diskv/status.png
+[2]: https://drone.io/github.com/peterbourgon/diskv/latest
+
+
+# Installing
+
+Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5].
+Then,
+
+```bash
+$ go get github.com/peterbourgon/diskv
+```
+
+[3]: http://golang.org
+[4]: http://golang.org/doc/install/source
+[5]: http://golang.org/doc/install
+
+
+# Usage
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/peterbourgon/diskv"
+)
+
+func main() {
+ // Simplest transform function: put all the data files into the base dir.
+ flatTransform := func(s string) []string { return []string{} }
+
+ // Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache.
+ d := diskv.New(diskv.Options{
+ BasePath: "my-data-dir",
+ Transform: flatTransform,
+ CacheSizeMax: 1024 * 1024,
+ })
+
+ // Write three bytes to the key "alpha".
+ key := "alpha"
+ d.Write(key, []byte{'1', '2', '3'})
+
+ // Read the value back out of the store.
+ value, _ := d.Read(key)
+ fmt.Printf("%v\n", value)
+
+ // Erase the key+value from the store (and the disk).
+ d.Erase(key)
+}
+```
+
+More complex examples can be found in the "examples" subdirectory.
+
+
+# Theory
+
+## Basic idea
+
+At its core, diskv is a map of a key (`string`) to arbitrary data (`[]byte`).
+The data is written to a single file on disk, with the same name as the key.
+The key determines where that file will be stored, via a user-provided
+`TransformFunc`, which takes a key and returns a slice (`[]string`)
+corresponding to a path list where the key file will be stored. The simplest
+TransformFunc,
+
+```go
+func SimpleTransform (key string) []string {
+ return []string{}
+}
+```
+
+will place all keys in the same, base directory. The design is inspired by
+[Redis diskstore][6]; a TransformFunc which emulates the default diskstore
+behavior is available in the content-addressable-storage example.
+
+[6]: http://groups.google.com/group/redis-db/browse_thread/thread/d444bc786689bde9?pli=1
+
+**Note** that your TransformFunc should ensure that one valid key doesn't
+transform to a subset of another valid key. That is, it shouldn't be possible
+to construct valid keys that resolve to directory names. As a concrete example,
+if your TransformFunc splits on every 3 characters, then
+
+```go
+d.Write("abcabc", val) // OK: written to /abc/abc/abcabc
+d.Write("abc", val) // Error: attempted write to /abc/abc, but it's a directory
+```
+
+This will be addressed in an upcoming version of diskv.
+
+Probably the most important design principle behind diskv is that your data is
+always flatly available on the disk. diskv will never do anything that would
+prevent you from accessing, copying, backing up, or otherwise interacting with
+your data via common UNIX commandline tools.
+
+## Adding a cache
+
+An in-memory caching layer is provided by combining the BasicStore
+functionality with a simple map structure, and keeping it up-to-date as
+appropriate. Since the map structure in Go is not threadsafe, it's combined
+with a RWMutex to provide safe concurrent access.
+
+## Adding order
+
+diskv is a key-value store and therefore inherently unordered. An ordering
+system can be injected into the store by passing something which satisfies the
+diskv.Index interface. (A default implementation, using Google's
+[btree][7] package, is provided.) Basically, diskv keeps an ordered (by a
+user-provided Less function) index of the keys, which can be queried.
+
+[7]: https://github.com/google/btree
+
+## Adding compression
+
+Something which implements the diskv.Compression interface may be passed
+during store creation, so that all Writes and Reads are filtered through
+a compression/decompression pipeline. Several default implementations,
+using stdlib compression algorithms, are provided. Note that data is cached
+compressed; the cost of decompression is borne with each Read.
+
+## Streaming
+
+diskv also now provides ReadStream and WriteStream methods, to allow very large
+data to be handled efficiently.
+
+
+# Future plans
+
+ * Needs plenty of robust testing: huge datasets, etc...
+ * More thorough benchmarking
+ * Your suggestions for use-cases I haven't thought of
diff --git a/vendor/github.com/peterbourgon/diskv/compression.go b/vendor/github.com/peterbourgon/diskv/compression.go
new file mode 100644
index 00000000..5192b027
--- /dev/null
+++ b/vendor/github.com/peterbourgon/diskv/compression.go
@@ -0,0 +1,64 @@
+package diskv
+
+import (
+ "compress/flate"
+ "compress/gzip"
+ "compress/zlib"
+ "io"
+)
+
+// Compression is an interface that Diskv uses to implement compression of
+// data. Writer takes a destination io.Writer and returns a WriteCloser that
+// compresses all data written through it. Reader takes a source io.Reader and
+// returns a ReadCloser that decompresses all data read through it. You may
+// define these methods on your own type, or use one of the NewCompression
+// helpers.
+type Compression interface {
+ Writer(dst io.Writer) (io.WriteCloser, error)
+ Reader(src io.Reader) (io.ReadCloser, error)
+}
+
+// NewGzipCompression returns a Gzip-based Compression.
+func NewGzipCompression() Compression {
+ return NewGzipCompressionLevel(flate.DefaultCompression)
+}
+
+// NewGzipCompressionLevel returns a Gzip-based Compression with the given level.
+func NewGzipCompressionLevel(level int) Compression {
+ return &genericCompression{
+ wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) },
+ rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) },
+ }
+}
+
+// NewZlibCompression returns a Zlib-based Compression.
+func NewZlibCompression() Compression {
+ return NewZlibCompressionLevel(flate.DefaultCompression)
+}
+
+// NewZlibCompressionLevel returns a Zlib-based Compression with the given level.
+func NewZlibCompressionLevel(level int) Compression {
+ return NewZlibCompressionLevelDict(level, nil)
+}
+
+// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given
+// level, based on the given dictionary.
+func NewZlibCompressionLevelDict(level int, dict []byte) Compression {
+ return &genericCompression{
+ func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) },
+ func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) },
+ }
+}
+
+type genericCompression struct {
+ wf func(w io.Writer) (io.WriteCloser, error)
+ rf func(r io.Reader) (io.ReadCloser, error)
+}
+
+func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) {
+ return g.wf(dst)
+}
+
+func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) {
+ return g.rf(src)
+}
diff --git a/vendor/github.com/peterbourgon/diskv/diskv.go b/vendor/github.com/peterbourgon/diskv/diskv.go
new file mode 100644
index 00000000..524dc0a6
--- /dev/null
+++ b/vendor/github.com/peterbourgon/diskv/diskv.go
@@ -0,0 +1,624 @@
+// Diskv (disk-vee) is a simple, persistent, key-value store.
+// It stores all data flatly on the filesystem.
+
+package diskv
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "sync"
+ "syscall"
+)
+
+const (
+ defaultBasePath = "diskv"
+ defaultFilePerm os.FileMode = 0666
+ defaultPathPerm os.FileMode = 0777
+)
+
+var (
+ defaultTransform = func(s string) []string { return []string{} }
+ errCanceled = errors.New("canceled")
+ errEmptyKey = errors.New("empty key")
+ errBadKey = errors.New("bad key")
+ errImportDirectory = errors.New("can't import a directory")
+)
+
+// TransformFunction transforms a key into a slice of strings, with each
+// element in the slice representing a directory in the file path where the
+// key's entry will eventually be stored.
+//
+// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"],
+// the final location of the data file will be /ab/cde/f/abcdef
+type TransformFunction func(s string) []string
+
+// Options define a set of properties that dictate Diskv behavior.
+// All values are optional.
+type Options struct {
+ BasePath string
+ Transform TransformFunction
+ CacheSizeMax uint64 // bytes
+ PathPerm os.FileMode
+ FilePerm os.FileMode
+ // If TempDir is set, it will enable filesystem atomic writes by
+ // writing temporary files to that location before being moved
+ // to BasePath.
+ // Note that TempDir MUST be on the same device/partition as
+ // BasePath.
+ TempDir string
+
+ Index Index
+ IndexLess LessFunction
+
+ Compression Compression
+}
+
+// Diskv implements the Diskv interface. You shouldn't construct Diskv
+// structures directly; instead, use the New constructor.
+type Diskv struct {
+ Options
+ mu sync.RWMutex
+ cache map[string][]byte
+ cacheSize uint64
+}
+
+// New returns an initialized Diskv structure, ready to use.
+// If the path identified by baseDir already contains data,
+// it will be accessible, but not yet cached.
+func New(o Options) *Diskv {
+ if o.BasePath == "" {
+ o.BasePath = defaultBasePath
+ }
+ if o.Transform == nil {
+ o.Transform = defaultTransform
+ }
+ if o.PathPerm == 0 {
+ o.PathPerm = defaultPathPerm
+ }
+ if o.FilePerm == 0 {
+ o.FilePerm = defaultFilePerm
+ }
+
+ d := &Diskv{
+ Options: o,
+ cache: map[string][]byte{},
+ cacheSize: 0,
+ }
+
+ if d.Index != nil && d.IndexLess != nil {
+ d.Index.Initialize(d.IndexLess, d.Keys(nil))
+ }
+
+ return d
+}
+
+// Write synchronously writes the key-value pair to disk, making it immediately
+// available for reads. Write relies on the filesystem to perform an eventual
+// sync to physical media. If you need stronger guarantees, see WriteStream.
+func (d *Diskv) Write(key string, val []byte) error {
+ return d.WriteStream(key, bytes.NewBuffer(val), false)
+}
+
+// WriteStream writes the data represented by the io.Reader to the disk, under
+// the provided key. If sync is true, WriteStream performs an explicit sync on
+// the file as soon as it's written.
+//
+// bytes.Buffer provides io.Reader semantics for basic data types.
+func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error {
+ if len(key) <= 0 {
+ return errEmptyKey
+ }
+
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ return d.writeStreamWithLock(key, r, sync)
+}
+
+// createKeyFileWithLock either creates the key file directly, or
+// creates a temporary file in TempDir if it is set.
+func (d *Diskv) createKeyFileWithLock(key string) (*os.File, error) {
+ if d.TempDir != "" {
+ if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil {
+ return nil, fmt.Errorf("temp mkdir: %s", err)
+ }
+ f, err := ioutil.TempFile(d.TempDir, "")
+ if err != nil {
+ return nil, fmt.Errorf("temp file: %s", err)
+ }
+
+ if err := f.Chmod(d.FilePerm); err != nil {
+ f.Close() // error deliberately ignored
+ os.Remove(f.Name()) // error deliberately ignored
+ return nil, fmt.Errorf("chmod: %s", err)
+ }
+ return f, nil
+ }
+
+ mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists
+ f, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm)
+ if err != nil {
+ return nil, fmt.Errorf("open file: %s", err)
+ }
+ return f, nil
+}
+
+// writeStream does no input validation checking.
+func (d *Diskv) writeStreamWithLock(key string, r io.Reader, sync bool) error {
+ if err := d.ensurePathWithLock(key); err != nil {
+ return fmt.Errorf("ensure path: %s", err)
+ }
+
+ f, err := d.createKeyFileWithLock(key)
+ if err != nil {
+ return fmt.Errorf("create key file: %s", err)
+ }
+
+ wc := io.WriteCloser(&nopWriteCloser{f})
+ if d.Compression != nil {
+ wc, err = d.Compression.Writer(f)
+ if err != nil {
+ f.Close() // error deliberately ignored
+ os.Remove(f.Name()) // error deliberately ignored
+ return fmt.Errorf("compression writer: %s", err)
+ }
+ }
+
+ if _, err := io.Copy(wc, r); err != nil {
+ f.Close() // error deliberately ignored
+ os.Remove(f.Name()) // error deliberately ignored
+ return fmt.Errorf("i/o copy: %s", err)
+ }
+
+ if err := wc.Close(); err != nil {
+ f.Close() // error deliberately ignored
+ os.Remove(f.Name()) // error deliberately ignored
+ return fmt.Errorf("compression close: %s", err)
+ }
+
+ if sync {
+ if err := f.Sync(); err != nil {
+ f.Close() // error deliberately ignored
+ os.Remove(f.Name()) // error deliberately ignored
+ return fmt.Errorf("file sync: %s", err)
+ }
+ }
+
+ if err := f.Close(); err != nil {
+ return fmt.Errorf("file close: %s", err)
+ }
+
+ if f.Name() != d.completeFilename(key) {
+ if err := os.Rename(f.Name(), d.completeFilename(key)); err != nil {
+ os.Remove(f.Name()) // error deliberately ignored
+ return fmt.Errorf("rename: %s", err)
+ }
+ }
+
+ if d.Index != nil {
+ d.Index.Insert(key)
+ }
+
+ d.bustCacheWithLock(key) // cache only on read
+
+ return nil
+}
+
+// Import imports the source file into diskv under the destination key. If the
+// destination key already exists, it's overwritten. If move is true, the
+// source file is removed after a successful import.
+func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) {
+ if dstKey == "" {
+ return errEmptyKey
+ }
+
+ if fi, err := os.Stat(srcFilename); err != nil {
+ return err
+ } else if fi.IsDir() {
+ return errImportDirectory
+ }
+
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ if err := d.ensurePathWithLock(dstKey); err != nil {
+ return fmt.Errorf("ensure path: %s", err)
+ }
+
+ if move {
+ if err := syscall.Rename(srcFilename, d.completeFilename(dstKey)); err == nil {
+ d.bustCacheWithLock(dstKey)
+ return nil
+ } else if err != syscall.EXDEV {
+ // If it failed due to being on a different device, fall back to copying
+ return err
+ }
+ }
+
+ f, err := os.Open(srcFilename)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+ err = d.writeStreamWithLock(dstKey, f, false)
+ if err == nil && move {
+ err = os.Remove(srcFilename)
+ }
+ return err
+}
+
+// Read reads the key and returns the value.
+// If the key is available in the cache, Read won't touch the disk.
+// If the key is not in the cache, Read will have the side-effect of
+// lazily caching the value.
+func (d *Diskv) Read(key string) ([]byte, error) {
+ rc, err := d.ReadStream(key, false)
+ if err != nil {
+ return []byte{}, err
+ }
+ defer rc.Close()
+ return ioutil.ReadAll(rc)
+}
+
+// ReadStream reads the key and returns the value (data) as an io.ReadCloser.
+// If the value is cached from a previous read, and direct is false,
+// ReadStream will use the cached value. Otherwise, it will return a handle to
+// the file on disk, and cache the data on read.
+//
+// If direct is true, ReadStream will lazily delete any cached value for the
+// key, and return a direct handle to the file on disk.
+//
+// If compression is enabled, ReadStream taps into the io.Reader stream prior
+// to decompression, and caches the compressed data.
+func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) {
+ d.mu.RLock()
+ defer d.mu.RUnlock()
+
+ if val, ok := d.cache[key]; ok {
+ if !direct {
+ buf := bytes.NewBuffer(val)
+ if d.Compression != nil {
+ return d.Compression.Reader(buf)
+ }
+ return ioutil.NopCloser(buf), nil
+ }
+
+ go func() {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ d.uncacheWithLock(key, uint64(len(val)))
+ }()
+ }
+
+ return d.readWithRLock(key)
+}
+
+// read ignores the cache, and returns an io.ReadCloser representing the
+// decompressed data for the given key, streamed from the disk. Clients should
+// acquire a read lock on the Diskv and check the cache themselves before
+// calling read.
+func (d *Diskv) readWithRLock(key string) (io.ReadCloser, error) {
+ filename := d.completeFilename(key)
+
+ fi, err := os.Stat(filename)
+ if err != nil {
+ return nil, err
+ }
+ if fi.IsDir() {
+ return nil, os.ErrNotExist
+ }
+
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+
+ var r io.Reader
+ if d.CacheSizeMax > 0 {
+ r = newSiphon(f, d, key)
+ } else {
+ r = &closingReader{f}
+ }
+
+ var rc = io.ReadCloser(ioutil.NopCloser(r))
+ if d.Compression != nil {
+ rc, err = d.Compression.Reader(r)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return rc, nil
+}
+
+// closingReader provides a Reader that automatically closes the
+// embedded ReadCloser when it reaches EOF
+type closingReader struct {
+ rc io.ReadCloser
+}
+
+func (cr closingReader) Read(p []byte) (int, error) {
+ n, err := cr.rc.Read(p)
+ if err == io.EOF {
+ if closeErr := cr.rc.Close(); closeErr != nil {
+ return n, closeErr // close must succeed for Read to succeed
+ }
+ }
+ return n, err
+}
+
+// siphon is like a TeeReader: it copies all data read through it to an
+// internal buffer, and moves that buffer to the cache at EOF.
+type siphon struct {
+ f *os.File
+ d *Diskv
+ key string
+ buf *bytes.Buffer
+}
+
+// newSiphon constructs a siphoning reader that represents the passed file.
+// When a successful series of reads ends in an EOF, the siphon will write
+// the buffered data to Diskv's cache under the given key.
+func newSiphon(f *os.File, d *Diskv, key string) io.Reader {
+ return &siphon{
+ f: f,
+ d: d,
+ key: key,
+ buf: &bytes.Buffer{},
+ }
+}
+
+// Read implements the io.Reader interface for siphon.
+func (s *siphon) Read(p []byte) (int, error) {
+ n, err := s.f.Read(p)
+
+ if err == nil {
+ return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed
+ }
+
+ if err == io.EOF {
+ s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail
+ if closeErr := s.f.Close(); closeErr != nil {
+ return n, closeErr // close must succeed for Read to succeed
+ }
+ return n, err
+ }
+
+ return n, err
+}
+
+// Erase synchronously erases the given key from the disk and the cache.
+func (d *Diskv) Erase(key string) error {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ d.bustCacheWithLock(key)
+
+ // erase from index
+ if d.Index != nil {
+ d.Index.Delete(key)
+ }
+
+ // erase from disk
+ filename := d.completeFilename(key)
+ if s, err := os.Stat(filename); err == nil {
+ if s.IsDir() {
+ return errBadKey
+ }
+ if err = os.Remove(filename); err != nil {
+ return err
+ }
+ } else {
+ // Return err as-is so caller can do os.IsNotExist(err).
+ return err
+ }
+
+ // clean up and return
+ d.pruneDirsWithLock(key)
+ return nil
+}
+
+// EraseAll will delete all of the data from the store, both in the cache and on
+// the disk. Note that EraseAll doesn't distinguish diskv-related data from non-
+// diskv-related data. Care should be taken to always specify a diskv base
+// directory that is exclusively for diskv data.
+func (d *Diskv) EraseAll() error {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ d.cache = make(map[string][]byte)
+ d.cacheSize = 0
+ if d.TempDir != "" {
+ os.RemoveAll(d.TempDir) // errors ignored
+ }
+ return os.RemoveAll(d.BasePath)
+}
+
+// Has returns true if the given key exists.
+func (d *Diskv) Has(key string) bool {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+
+ if _, ok := d.cache[key]; ok {
+ return true
+ }
+
+ filename := d.completeFilename(key)
+ s, err := os.Stat(filename)
+ if err != nil {
+ return false
+ }
+ if s.IsDir() {
+ return false
+ }
+
+ return true
+}
+
+// Keys returns a channel that will yield every key accessible by the store,
+// in undefined order. If a cancel channel is provided, closing it will
+// terminate and close the keys channel.
+func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string {
+ return d.KeysPrefix("", cancel)
+}
+
+// KeysPrefix returns a channel that will yield every key accessible by the
+// store with the given prefix, in undefined order. If a cancel channel is
+// provided, closing it will terminate and close the keys channel. If the
+// provided prefix is the empty string, all keys will be yielded.
+func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string {
+ var prepath string
+ if prefix == "" {
+ prepath = d.BasePath
+ } else {
+ prepath = d.pathFor(prefix)
+ }
+ c := make(chan string)
+ go func() {
+ filepath.Walk(prepath, walker(c, prefix, cancel))
+ close(c)
+ }()
+ return c
+}
+
+// walker returns a function which satisfies the filepath.WalkFunc interface.
+// It sends every non-directory file entry down the channel c.
+func walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc {
+ return func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+
+ if info.IsDir() || !strings.HasPrefix(info.Name(), prefix) {
+ return nil // "pass"
+ }
+
+ select {
+ case c <- info.Name():
+ case <-cancel:
+ return errCanceled
+ }
+
+ return nil
+ }
+}
+
+// pathFor returns the absolute path for location on the filesystem where the
+// data for the given key will be stored.
+func (d *Diskv) pathFor(key string) string {
+ return filepath.Join(d.BasePath, filepath.Join(d.Transform(key)...))
+}
+
+// ensurePathWithLock is a helper function that generates all necessary
+// directories on the filesystem for the given key.
+func (d *Diskv) ensurePathWithLock(key string) error {
+ return os.MkdirAll(d.pathFor(key), d.PathPerm)
+}
+
+// completeFilename returns the absolute path to the file for the given key.
+func (d *Diskv) completeFilename(key string) string {
+ return filepath.Join(d.pathFor(key), key)
+}
+
+// cacheWithLock attempts to cache the given key-value pair in the store's
+// cache. It can fail if the value is larger than the cache's maximum size.
+func (d *Diskv) cacheWithLock(key string, val []byte) error {
+ valueSize := uint64(len(val))
+ if err := d.ensureCacheSpaceWithLock(valueSize); err != nil {
+ return fmt.Errorf("%s; not caching", err)
+ }
+
+ // be very strict about memory guarantees
+ if (d.cacheSize + valueSize) > d.CacheSizeMax {
+ panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax))
+ }
+
+ d.cache[key] = val
+ d.cacheSize += valueSize
+ return nil
+}
+
+// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock.
+func (d *Diskv) cacheWithoutLock(key string, val []byte) error {
+ d.mu.Lock()
+ defer d.mu.Unlock()
+ return d.cacheWithLock(key, val)
+}
+
+func (d *Diskv) bustCacheWithLock(key string) {
+ if val, ok := d.cache[key]; ok {
+ d.uncacheWithLock(key, uint64(len(val)))
+ }
+}
+
+func (d *Diskv) uncacheWithLock(key string, sz uint64) {
+ d.cacheSize -= sz
+ delete(d.cache, key)
+}
+
+// pruneDirsWithLock deletes empty directories in the path walk leading to the
+// key k. Typically this function is called after an Erase is made.
+func (d *Diskv) pruneDirsWithLock(key string) error {
+ pathlist := d.Transform(key)
+ for i := range pathlist {
+ dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...))
+
+ // thanks to Steven Blenkinsop for this snippet
+ switch fi, err := os.Stat(dir); true {
+ case err != nil:
+ return err
+ case !fi.IsDir():
+ panic(fmt.Sprintf("corrupt dirstate at %s", dir))
+ }
+
+ nlinks, err := filepath.Glob(filepath.Join(dir, "*"))
+ if err != nil {
+ return err
+ } else if len(nlinks) > 0 {
+ return nil // has subdirs -- do not prune
+ }
+ if err = os.Remove(dir); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order
+// until the cache has at least valueSize bytes available.
+func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error {
+ if valueSize > d.CacheSizeMax {
+ return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax)
+ }
+
+ safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax }
+
+ for key, val := range d.cache {
+ if safe() {
+ break
+ }
+
+ d.uncacheWithLock(key, uint64(len(val)))
+ }
+
+ if !safe() {
+ panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax))
+ }
+
+ return nil
+}
+
+// nopWriteCloser wraps an io.Writer and provides a no-op Close method to
+// satisfy the io.WriteCloser interface.
+type nopWriteCloser struct {
+ io.Writer
+}
+
+func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) }
+func (wc *nopWriteCloser) Close() error { return nil }
diff --git a/vendor/github.com/peterbourgon/diskv/index.go b/vendor/github.com/peterbourgon/diskv/index.go
new file mode 100644
index 00000000..96fee515
--- /dev/null
+++ b/vendor/github.com/peterbourgon/diskv/index.go
@@ -0,0 +1,115 @@
+package diskv
+
+import (
+ "sync"
+
+ "github.com/google/btree"
+)
+
+// Index is a generic interface for things that can
+// provide an ordered list of keys.
+type Index interface {
+ Initialize(less LessFunction, keys <-chan string)
+ Insert(key string)
+ Delete(key string)
+ Keys(from string, n int) []string
+}
+
+// LessFunction is used to initialize an Index of keys in a specific order.
+type LessFunction func(string, string) bool
+
+// btreeString is a custom data type that satisfies the BTree Less interface,
+// making the strings it wraps sortable by the BTree package.
+type btreeString struct {
+ s string
+ l LessFunction
+}
+
+// Less satisfies the BTree.Less interface using the btreeString's LessFunction.
+func (s btreeString) Less(i btree.Item) bool {
+ return s.l(s.s, i.(btreeString).s)
+}
+
+// BTreeIndex is an implementation of the Index interface using google/btree.
+type BTreeIndex struct {
+ sync.RWMutex
+ LessFunction
+ *btree.BTree
+}
+
+// Initialize populates the BTree tree with data from the keys channel,
+// according to the passed less function. It's destructive to the BTreeIndex.
+func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) {
+ i.Lock()
+ defer i.Unlock()
+ i.LessFunction = less
+ i.BTree = rebuild(less, keys)
+}
+
+// Insert inserts the given key (only) into the BTree tree.
+func (i *BTreeIndex) Insert(key string) {
+ i.Lock()
+ defer i.Unlock()
+ if i.BTree == nil || i.LessFunction == nil {
+ panic("uninitialized index")
+ }
+ i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction})
+}
+
+// Delete removes the given key (only) from the BTree tree.
+func (i *BTreeIndex) Delete(key string) {
+ i.Lock()
+ defer i.Unlock()
+ if i.BTree == nil || i.LessFunction == nil {
+ panic("uninitialized index")
+ }
+ i.BTree.Delete(btreeString{s: key, l: i.LessFunction})
+}
+
+// Keys yields a maximum of n keys in order. If the passed 'from' key is empty,
+// Keys will return the first n keys. If the passed 'from' key is non-empty, the
+// first key in the returned slice will be the key that immediately follows the
+// passed key, in key order.
+func (i *BTreeIndex) Keys(from string, n int) []string {
+ i.RLock()
+ defer i.RUnlock()
+
+ if i.BTree == nil || i.LessFunction == nil {
+ panic("uninitialized index")
+ }
+
+ if i.BTree.Len() <= 0 {
+ return []string{}
+ }
+
+ btreeFrom := btreeString{s: from, l: i.LessFunction}
+ skipFirst := true
+ if len(from) <= 0 || !i.BTree.Has(btreeFrom) {
+ // no such key, so fabricate an always-smallest item
+ btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }}
+ skipFirst = false
+ }
+
+ keys := []string{}
+ iterator := func(i btree.Item) bool {
+ keys = append(keys, i.(btreeString).s)
+ return len(keys) < n
+ }
+ i.BTree.AscendGreaterOrEqual(btreeFrom, iterator)
+
+ if skipFirst && len(keys) > 0 {
+ keys = keys[1:]
+ }
+
+ return keys
+}
+
+// rebuildIndex does the work of regenerating the index
+// with the given keys.
+func rebuild(less LessFunction, keys <-chan string) *btree.BTree {
+ tree := btree.New(2)
+ for key := range keys {
+ tree.ReplaceOrInsert(btreeString{s: key, l: less})
+ }
+ return tree
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 457c4f32..0f08d54c 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -203,6 +203,8 @@ github.com/golang/protobuf/ptypes/duration
github.com/golang/protobuf/ptypes/timestamp
# github.com/golang/snappy v0.0.3
github.com/golang/snappy
+# github.com/google/btree v1.0.0
+github.com/google/btree
# github.com/google/go-containerregistry v0.6.0
## explicit
github.com/google/go-containerregistry/internal/and
@@ -399,6 +401,9 @@ github.com/otiai10/copy
# github.com/pelletier/go-toml v1.9.4
## explicit
github.com/pelletier/go-toml
+# github.com/peterbourgon/diskv v2.0.1+incompatible
+## explicit
+github.com/peterbourgon/diskv
# github.com/philopon/go-toposort v0.0.0-20170620085441-9be86dbd762f
## explicit
github.com/philopon/go-toposort