mirror of
https://github.com/containers/skopeo.git
synced 2025-07-17 00:02:03 +00:00
Vendor after merging c/image#536
... which adds blob info caching Signed-off-by: Miloslav Trmač <mitr@redhat.com>
This commit is contained in:
parent
bfc0c5e531
commit
bcf3dbbb93
@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/containers/image/directory"
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/pkg/blobinfocache"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
@ -32,6 +33,7 @@ var layersCmd = cli.Command{
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cache := blobinfocache.DefaultCache(sys)
|
||||
rawSource, err := parseImageSource(ctx, c, c.Args()[0])
|
||||
if err != nil {
|
||||
return err
|
||||
@ -101,11 +103,11 @@ var layersCmd = cli.Command{
|
||||
}()
|
||||
|
||||
for _, bd := range blobDigests {
|
||||
r, blobSize, err := rawSource.GetBlob(ctx, types.BlobInfo{Digest: bd.digest, Size: -1})
|
||||
r, blobSize, err := rawSource.GetBlob(ctx, types.BlobInfo{Digest: bd.digest, Size: -1}, cache)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := dest.PutBlob(ctx, r, types.BlobInfo{Digest: bd.digest, Size: blobSize}, bd.isConfig); err != nil {
|
||||
if _, err := dest.PutBlob(ctx, r, types.BlobInfo{Digest: bd.digest, Size: blobSize}, cache, bd.isConfig); err != nil {
|
||||
if closeErr := r.Close(); closeErr != nil {
|
||||
return errors.Wrapf(err, " (close error: %v)", closeErr)
|
||||
}
|
||||
|
@ -13,6 +13,7 @@ github.com/pmezard/go-difflib master
|
||||
github.com/pkg/errors master
|
||||
golang.org/x/crypto master
|
||||
github.com/ulikunitz/xz v0.5.4
|
||||
github.com/boltdb/bolt master
|
||||
# docker deps from https://github.com/docker/docker/blob/v1.11.2/hack/vendor.sh
|
||||
github.com/docker/docker da99009bbb1165d1ac5688b5c81d2f589d418341
|
||||
github.com/docker/go-connections 7beb39f0b969b075d1325fecb092faf27fd357b6
|
||||
|
20
vendor/github.com/boltdb/bolt/LICENSE
generated
vendored
Normal file
20
vendor/github.com/boltdb/bolt/LICENSE
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Ben Johnson
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
935
vendor/github.com/boltdb/bolt/README.md
generated
vendored
Normal file
935
vendor/github.com/boltdb/bolt/README.md
generated
vendored
Normal file
@ -0,0 +1,935 @@
|
||||
Bolt [](https://coveralls.io/r/boltdb/bolt?branch=master) [](https://godoc.org/github.com/boltdb/bolt) 
|
||||
====
|
||||
|
||||
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
|
||||
[LMDB project][lmdb]. The goal of the project is to provide a simple,
|
||||
fast, and reliable database for projects that don't require a full database
|
||||
server such as Postgres or MySQL.
|
||||
|
||||
Since Bolt is meant to be used as such a low-level piece of functionality,
|
||||
simplicity is key. The API will be small and only focus on getting values
|
||||
and setting values. That's it.
|
||||
|
||||
[hyc_symas]: https://twitter.com/hyc_symas
|
||||
[lmdb]: http://symas.com/mdb/
|
||||
|
||||
## Project Status
|
||||
|
||||
Bolt is stable, the API is fixed, and the file format is fixed. Full unit
|
||||
test coverage and randomized black box testing are used to ensure database
|
||||
consistency and thread safety. Bolt is currently used in high-load production
|
||||
environments serving databases as large as 1TB. Many companies such as
|
||||
Shopify and Heroku use Bolt-backed services every day.
|
||||
|
||||
## A message from the author
|
||||
|
||||
> The original goal of Bolt was to provide a simple pure Go key/value store and to
|
||||
> not bloat the code with extraneous features. To that end, the project has been
|
||||
> a success. However, this limited scope also means that the project is complete.
|
||||
>
|
||||
> Maintaining an open source database requires an immense amount of time and energy.
|
||||
> Changes to the code can have unintended and sometimes catastrophic effects so
|
||||
> even simple changes require hours and hours of careful testing and validation.
|
||||
>
|
||||
> Unfortunately I no longer have the time or energy to continue this work. Bolt is
|
||||
> in a stable state and has years of successful production use. As such, I feel that
|
||||
> leaving it in its current state is the most prudent course of action.
|
||||
>
|
||||
> If you are interested in using a more featureful version of Bolt, I suggest that
|
||||
> you look at the CoreOS fork called [bbolt](https://github.com/coreos/bbolt).
|
||||
|
||||
- Ben Johnson ([@benbjohnson](https://twitter.com/benbjohnson))
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Getting Started](#getting-started)
|
||||
- [Installing](#installing)
|
||||
- [Opening a database](#opening-a-database)
|
||||
- [Transactions](#transactions)
|
||||
- [Read-write transactions](#read-write-transactions)
|
||||
- [Read-only transactions](#read-only-transactions)
|
||||
- [Batch read-write transactions](#batch-read-write-transactions)
|
||||
- [Managing transactions manually](#managing-transactions-manually)
|
||||
- [Using buckets](#using-buckets)
|
||||
- [Using key/value pairs](#using-keyvalue-pairs)
|
||||
- [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
|
||||
- [Iterating over keys](#iterating-over-keys)
|
||||
- [Prefix scans](#prefix-scans)
|
||||
- [Range scans](#range-scans)
|
||||
- [ForEach()](#foreach)
|
||||
- [Nested buckets](#nested-buckets)
|
||||
- [Database backups](#database-backups)
|
||||
- [Statistics](#statistics)
|
||||
- [Read-Only Mode](#read-only-mode)
|
||||
- [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
|
||||
- [Resources](#resources)
|
||||
- [Comparison with other databases](#comparison-with-other-databases)
|
||||
- [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
|
||||
- [LevelDB, RocksDB](#leveldb-rocksdb)
|
||||
- [LMDB](#lmdb)
|
||||
- [Caveats & Limitations](#caveats--limitations)
|
||||
- [Reading the Source](#reading-the-source)
|
||||
- [Other Projects Using Bolt](#other-projects-using-bolt)
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Installing
|
||||
|
||||
To start using Bolt, install Go and run `go get`:
|
||||
|
||||
```sh
|
||||
$ go get github.com/boltdb/bolt/...
|
||||
```
|
||||
|
||||
This will retrieve the library and install the `bolt` command line utility into
|
||||
your `$GOBIN` path.
|
||||
|
||||
|
||||
### Opening a database
|
||||
|
||||
The top-level object in Bolt is a `DB`. It is represented as a single file on
|
||||
your disk and represents a consistent snapshot of your data.
|
||||
|
||||
To open your database, simply use the `bolt.Open()` function:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Open the my.db data file in your current directory.
|
||||
// It will be created if it doesn't exist.
|
||||
db, err := bolt.Open("my.db", 0600, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
Please note that Bolt obtains a file lock on the data file so multiple processes
|
||||
cannot open the same database at the same time. Opening an already open Bolt
|
||||
database will cause it to hang until the other process closes it. To prevent
|
||||
an indefinite wait you can pass a timeout option to the `Open()` function:
|
||||
|
||||
```go
|
||||
db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second})
|
||||
```
|
||||
|
||||
|
||||
### Transactions
|
||||
|
||||
Bolt allows only one read-write transaction at a time but allows as many
|
||||
read-only transactions as you want at a time. Each transaction has a consistent
|
||||
view of the data as it existed when the transaction started.
|
||||
|
||||
Individual transactions and all objects created from them (e.g. buckets, keys)
|
||||
are not thread safe. To work with data in multiple goroutines you must start
|
||||
a transaction for each one or use locking to ensure only one goroutine accesses
|
||||
a transaction at a time. Creating transaction from the `DB` is thread safe.
|
||||
|
||||
Read-only transactions and read-write transactions should not depend on one
|
||||
another and generally shouldn't be opened simultaneously in the same goroutine.
|
||||
This can cause a deadlock as the read-write transaction needs to periodically
|
||||
re-map the data file but it cannot do so while a read-only transaction is open.
|
||||
|
||||
|
||||
#### Read-write transactions
|
||||
|
||||
To start a read-write transaction, you can use the `DB.Update()` function:
|
||||
|
||||
```go
|
||||
err := db.Update(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Inside the closure, you have a consistent view of the database. You commit the
|
||||
transaction by returning `nil` at the end. You can also rollback the transaction
|
||||
at any point by returning an error. All database operations are allowed inside
|
||||
a read-write transaction.
|
||||
|
||||
Always check the return error as it will report any disk failures that can cause
|
||||
your transaction to not complete. If you return an error within your closure
|
||||
it will be passed through.
|
||||
|
||||
|
||||
#### Read-only transactions
|
||||
|
||||
To start a read-only transaction, you can use the `DB.View()` function:
|
||||
|
||||
```go
|
||||
err := db.View(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
You also get a consistent view of the database within this closure, however,
|
||||
no mutating operations are allowed within a read-only transaction. You can only
|
||||
retrieve buckets, retrieve values, and copy the database within a read-only
|
||||
transaction.
|
||||
|
||||
|
||||
#### Batch read-write transactions
|
||||
|
||||
Each `DB.Update()` waits for disk to commit the writes. This overhead
|
||||
can be minimized by combining multiple updates with the `DB.Batch()`
|
||||
function:
|
||||
|
||||
```go
|
||||
err := db.Batch(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Concurrent Batch calls are opportunistically combined into larger
|
||||
transactions. Batch is only useful when there are multiple goroutines
|
||||
calling it.
|
||||
|
||||
The trade-off is that `Batch` can call the given
|
||||
function multiple times, if parts of the transaction fail. The
|
||||
function must be idempotent and side effects must take effect only
|
||||
after a successful return from `DB.Batch()`.
|
||||
|
||||
For example: don't display messages from inside the function, instead
|
||||
set variables in the enclosing scope:
|
||||
|
||||
```go
|
||||
var id uint64
|
||||
err := db.Batch(func(tx *bolt.Tx) error {
|
||||
// Find last key in bucket, decode as bigendian uint64, increment
|
||||
// by one, encode back to []byte, and add new key.
|
||||
...
|
||||
id = newValue
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return ...
|
||||
}
|
||||
fmt.Println("Allocated ID %d", id)
|
||||
```
|
||||
|
||||
|
||||
#### Managing transactions manually
|
||||
|
||||
The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()`
|
||||
function. These helper functions will start the transaction, execute a function,
|
||||
and then safely close your transaction if an error is returned. This is the
|
||||
recommended way to use Bolt transactions.
|
||||
|
||||
However, sometimes you may want to manually start and end your transactions.
|
||||
You can use the `DB.Begin()` function directly but **please** be sure to close
|
||||
the transaction.
|
||||
|
||||
```go
|
||||
// Start a writable transaction.
|
||||
tx, err := db.Begin(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// Use the transaction...
|
||||
_, err := tx.CreateBucket([]byte("MyBucket"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit the transaction and check for error.
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
The first argument to `DB.Begin()` is a boolean stating if the transaction
|
||||
should be writable.
|
||||
|
||||
|
||||
### Using buckets
|
||||
|
||||
Buckets are collections of key/value pairs within the database. All keys in a
|
||||
bucket must be unique. You can create a bucket using the `DB.CreateBucket()`
|
||||
function:
|
||||
|
||||
```go
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("MyBucket"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("create bucket: %s", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
You can also create a bucket only if it doesn't exist by using the
|
||||
`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this
|
||||
function for all your top-level buckets after you open your database so you can
|
||||
guarantee that they exist for future transactions.
|
||||
|
||||
To delete a bucket, simply call the `Tx.DeleteBucket()` function.
|
||||
|
||||
|
||||
### Using key/value pairs
|
||||
|
||||
To save a key/value pair to a bucket, use the `Bucket.Put()` function:
|
||||
|
||||
```go
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
err := b.Put([]byte("answer"), []byte("42"))
|
||||
return err
|
||||
})
|
||||
```
|
||||
|
||||
This will set the value of the `"answer"` key to `"42"` in the `MyBucket`
|
||||
bucket. To retrieve this value, we can use the `Bucket.Get()` function:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
v := b.Get([]byte("answer"))
|
||||
fmt.Printf("The answer is: %s\n", v)
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
The `Get()` function does not return an error because its operation is
|
||||
guaranteed to work (unless there is some kind of system failure). If the key
|
||||
exists then it will return its byte slice value. If it doesn't exist then it
|
||||
will return `nil`. It's important to note that you can have a zero-length value
|
||||
set to a key which is different than the key not existing.
|
||||
|
||||
Use the `Bucket.Delete()` function to delete a key from the bucket.
|
||||
|
||||
Please note that values returned from `Get()` are only valid while the
|
||||
transaction is open. If you need to use a value outside of the transaction
|
||||
then you must use `copy()` to copy it to another byte slice.
|
||||
|
||||
|
||||
### Autoincrementing integer for the bucket
|
||||
By using the `NextSequence()` function, you can let Bolt determine a sequence
|
||||
which can be used as the unique identifier for your key/value pairs. See the
|
||||
example below.
|
||||
|
||||
```go
|
||||
// CreateUser saves u to the store. The new user ID is set on u once the data is persisted.
|
||||
func (s *Store) CreateUser(u *User) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Retrieve the users bucket.
|
||||
// This should be created when the DB is first opened.
|
||||
b := tx.Bucket([]byte("users"))
|
||||
|
||||
// Generate ID for the user.
|
||||
// This returns an error only if the Tx is closed or not writeable.
|
||||
// That can't happen in an Update() call so I ignore the error check.
|
||||
id, _ := b.NextSequence()
|
||||
u.ID = int(id)
|
||||
|
||||
// Marshal user data into bytes.
|
||||
buf, err := json.Marshal(u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Persist bytes to users bucket.
|
||||
return b.Put(itob(u.ID), buf)
|
||||
})
|
||||
}
|
||||
|
||||
// itob returns an 8-byte big endian representation of v.
|
||||
func itob(v int) []byte {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b, uint64(v))
|
||||
return b
|
||||
}
|
||||
|
||||
type User struct {
|
||||
ID int
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
### Iterating over keys
|
||||
|
||||
Bolt stores its keys in byte-sorted order within a bucket. This makes sequential
|
||||
iteration over these keys extremely fast. To iterate over keys we'll use a
|
||||
`Cursor`:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume bucket exists and has keys
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
|
||||
c := b.Cursor()
|
||||
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
The cursor allows you to move to a specific point in the list of keys and move
|
||||
forward or backward through the keys one at a time.
|
||||
|
||||
The following functions are available on the cursor:
|
||||
|
||||
```
|
||||
First() Move to the first key.
|
||||
Last() Move to the last key.
|
||||
Seek() Move to a specific key.
|
||||
Next() Move to the next key.
|
||||
Prev() Move to the previous key.
|
||||
```
|
||||
|
||||
Each of those functions has a return signature of `(key []byte, value []byte)`.
|
||||
When you have iterated to the end of the cursor then `Next()` will return a
|
||||
`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()`
|
||||
before calling `Next()` or `Prev()`. If you do not seek to a position then
|
||||
these functions will return a `nil` key.
|
||||
|
||||
During iteration, if the key is non-`nil` but the value is `nil`, that means
|
||||
the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to
|
||||
access the sub-bucket.
|
||||
|
||||
|
||||
#### Prefix scans
|
||||
|
||||
To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume bucket exists and has keys
|
||||
c := tx.Bucket([]byte("MyBucket")).Cursor()
|
||||
|
||||
prefix := []byte("1234")
|
||||
for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
#### Range scans
|
||||
|
||||
Another common use case is scanning over a range such as a time range. If you
|
||||
use a sortable time encoding such as RFC3339 then you can query a specific
|
||||
date range like this:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume our events bucket exists and has RFC3339 encoded time keys.
|
||||
c := tx.Bucket([]byte("Events")).Cursor()
|
||||
|
||||
// Our time range spans the 90's decade.
|
||||
min := []byte("1990-01-01T00:00:00Z")
|
||||
max := []byte("2000-01-01T00:00:00Z")
|
||||
|
||||
// Iterate over the 90's.
|
||||
for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
|
||||
fmt.Printf("%s: %s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable.
|
||||
|
||||
|
||||
#### ForEach()
|
||||
|
||||
You can also use the function `ForEach()` if you know you'll be iterating over
|
||||
all the keys in a bucket:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume bucket exists and has keys
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
|
||||
b.ForEach(func(k, v []byte) error {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Please note that keys and values in `ForEach()` are only valid while
|
||||
the transaction is open. If you need to use a key or value outside of
|
||||
the transaction, you must use `copy()` to copy it to another byte
|
||||
slice.
|
||||
|
||||
### Nested buckets
|
||||
|
||||
You can also store a bucket in a key to create nested buckets. The API is the
|
||||
same as the bucket management API on the `DB` object:
|
||||
|
||||
```go
|
||||
func (*Bucket) CreateBucket(key []byte) (*Bucket, error)
|
||||
func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
|
||||
func (*Bucket) DeleteBucket(key []byte) error
|
||||
```
|
||||
|
||||
Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings.
|
||||
|
||||
```go
|
||||
|
||||
// createUser creates a new user in the given account.
|
||||
func createUser(accountID int, u *User) error {
|
||||
// Start the transaction.
|
||||
tx, err := db.Begin(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// Retrieve the root bucket for the account.
|
||||
// Assume this has already been created when the account was set up.
|
||||
root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10)))
|
||||
|
||||
// Setup the users bucket.
|
||||
bkt, err := root.CreateBucketIfNotExists([]byte("USERS"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Generate an ID for the new user.
|
||||
userID, err := bkt.NextSequence()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
u.ID = userID
|
||||
|
||||
// Marshal and save the encoded user.
|
||||
if buf, err := json.Marshal(u); err != nil {
|
||||
return err
|
||||
} else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit the transaction.
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
|
||||
|
||||
|
||||
### Database backups
|
||||
|
||||
Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
|
||||
function to write a consistent view of the database to a writer. If you call
|
||||
this from a read-only transaction, it will perform a hot backup and not block
|
||||
your other database reads and writes.
|
||||
|
||||
By default, it will use a regular file handle which will utilize the operating
|
||||
system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx)
|
||||
documentation for information about optimizing for larger-than-RAM datasets.
|
||||
|
||||
One common use case is to backup over HTTP so you can use tools like `cURL` to
|
||||
do database backups:
|
||||
|
||||
```go
|
||||
func BackupHandleFunc(w http.ResponseWriter, req *http.Request) {
|
||||
err := db.View(func(tx *bolt.Tx) error {
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
w.Header().Set("Content-Disposition", `attachment; filename="my.db"`)
|
||||
w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size())))
|
||||
_, err := tx.WriteTo(w)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Then you can backup using this command:
|
||||
|
||||
```sh
|
||||
$ curl http://localhost/backup > my.db
|
||||
```
|
||||
|
||||
Or you can open your browser to `http://localhost/backup` and it will download
|
||||
automatically.
|
||||
|
||||
If you want to backup to another file you can use the `Tx.CopyFile()` helper
|
||||
function.
|
||||
|
||||
|
||||
### Statistics
|
||||
|
||||
The database keeps a running count of many of the internal operations it
|
||||
performs so you can better understand what's going on. By grabbing a snapshot
|
||||
of these stats at two points in time we can see what operations were performed
|
||||
in that time range.
|
||||
|
||||
For example, we could start a goroutine to log stats every 10 seconds:
|
||||
|
||||
```go
|
||||
go func() {
|
||||
// Grab the initial stats.
|
||||
prev := db.Stats()
|
||||
|
||||
for {
|
||||
// Wait for 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
// Grab the current stats and diff them.
|
||||
stats := db.Stats()
|
||||
diff := stats.Sub(&prev)
|
||||
|
||||
// Encode stats to JSON and print to STDERR.
|
||||
json.NewEncoder(os.Stderr).Encode(diff)
|
||||
|
||||
// Save stats for the next loop.
|
||||
prev = stats
|
||||
}
|
||||
}()
|
||||
```
|
||||
|
||||
It's also useful to pipe these stats to a service such as statsd for monitoring
|
||||
or to provide an HTTP endpoint that will perform a fixed-length sample.
|
||||
|
||||
|
||||
### Read-Only Mode
|
||||
|
||||
Sometimes it is useful to create a shared, read-only Bolt database. To this,
|
||||
set the `Options.ReadOnly` flag when opening your database. Read-only mode
|
||||
uses a shared lock to allow multiple processes to read from the database but
|
||||
it will block any processes from opening the database in read-write mode.
|
||||
|
||||
```go
|
||||
db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
### Mobile Use (iOS/Android)
|
||||
|
||||
Bolt is able to run on mobile devices by leveraging the binding feature of the
|
||||
[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
|
||||
contain your database logic and a reference to a `*bolt.DB` with a initializing
|
||||
constructor that takes in a filepath where the database file will be stored.
|
||||
Neither Android nor iOS require extra permissions or cleanup from using this method.
|
||||
|
||||
```go
|
||||
func NewBoltDB(filepath string) *BoltDB {
|
||||
db, err := bolt.Open(filepath+"/demo.db", 0600, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
return &BoltDB{db}
|
||||
}
|
||||
|
||||
type BoltDB struct {
|
||||
db *bolt.DB
|
||||
...
|
||||
}
|
||||
|
||||
func (b *BoltDB) Path() string {
|
||||
return b.db.Path()
|
||||
}
|
||||
|
||||
func (b *BoltDB) Close() {
|
||||
b.db.Close()
|
||||
}
|
||||
```
|
||||
|
||||
Database logic should be defined as methods on this wrapper struct.
|
||||
|
||||
To initialize this struct from the native language (both platforms now sync
|
||||
their local storage to the cloud. These snippets disable that functionality for the
|
||||
database file):
|
||||
|
||||
#### Android
|
||||
|
||||
```java
|
||||
String path;
|
||||
if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){
|
||||
path = getNoBackupFilesDir().getAbsolutePath();
|
||||
} else{
|
||||
path = getFilesDir().getAbsolutePath();
|
||||
}
|
||||
Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path)
|
||||
```
|
||||
|
||||
#### iOS
|
||||
|
||||
```objc
|
||||
- (void)demo {
|
||||
NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory,
|
||||
NSUserDomainMask,
|
||||
YES) objectAtIndex:0];
|
||||
GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path);
|
||||
[self addSkipBackupAttributeToItemAtPath:demo.path];
|
||||
//Some DB Logic would go here
|
||||
[demo close];
|
||||
}
|
||||
|
||||
- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString
|
||||
{
|
||||
NSURL* URL= [NSURL fileURLWithPath: filePathString];
|
||||
assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);
|
||||
|
||||
NSError *error = nil;
|
||||
BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]
|
||||
forKey: NSURLIsExcludedFromBackupKey error: &error];
|
||||
if(!success){
|
||||
NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error);
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
For more information on getting started with Bolt, check out the following articles:
|
||||
|
||||
* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch).
|
||||
* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville
|
||||
|
||||
|
||||
## Comparison with other databases
|
||||
|
||||
### Postgres, MySQL, & other relational databases
|
||||
|
||||
Relational databases structure data into rows and are only accessible through
|
||||
the use of SQL. This approach provides flexibility in how you store and query
|
||||
your data but also incurs overhead in parsing and planning SQL statements. Bolt
|
||||
accesses all data by a byte slice key. This makes Bolt fast to read and write
|
||||
data by key but provides no built-in support for joining values together.
|
||||
|
||||
Most relational databases (with the exception of SQLite) are standalone servers
|
||||
that run separately from your application. This gives your systems
|
||||
flexibility to connect multiple application servers to a single database
|
||||
server but also adds overhead in serializing and transporting data over the
|
||||
network. Bolt runs as a library included in your application so all data access
|
||||
has to go through your application's process. This brings data closer to your
|
||||
application but limits multi-process access to the data.
|
||||
|
||||
|
||||
### LevelDB, RocksDB
|
||||
|
||||
LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that
|
||||
they are libraries bundled into the application, however, their underlying
|
||||
structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
|
||||
random writes by using a write ahead log and multi-tiered, sorted files called
|
||||
SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
|
||||
have trade-offs.
|
||||
|
||||
If you require a high random write throughput (>10,000 w/sec) or you need to use
|
||||
spinning disks then LevelDB could be a good choice. If your application is
|
||||
read-heavy or does a lot of range scans then Bolt could be a good choice.
|
||||
|
||||
One other important consideration is that LevelDB does not have transactions.
|
||||
It supports batch writing of key/values pairs and it supports read snapshots
|
||||
but it will not give you the ability to do a compare-and-swap operation safely.
|
||||
Bolt supports fully serializable ACID transactions.
|
||||
|
||||
|
||||
### LMDB
|
||||
|
||||
Bolt was originally a port of LMDB so it is architecturally similar. Both use
|
||||
a B+tree, have ACID semantics with fully serializable transactions, and support
|
||||
lock-free MVCC using a single writer and multiple readers.
|
||||
|
||||
The two projects have somewhat diverged. LMDB heavily focuses on raw performance
|
||||
while Bolt has focused on simplicity and ease of use. For example, LMDB allows
|
||||
several unsafe actions such as direct writes for the sake of performance. Bolt
|
||||
opts to disallow actions which can leave the database in a corrupted state. The
|
||||
only exception to this in Bolt is `DB.NoSync`.
|
||||
|
||||
There are also a few differences in API. LMDB requires a maximum mmap size when
|
||||
opening an `mdb_env` whereas Bolt will handle incremental mmap resizing
|
||||
automatically. LMDB overloads the getter and setter functions with multiple
|
||||
flags whereas Bolt splits these specialized cases into their own functions.
|
||||
|
||||
|
||||
## Caveats & Limitations
|
||||
|
||||
It's important to pick the right tool for the job and Bolt is no exception.
|
||||
Here are a few things to note when evaluating and using Bolt:
|
||||
|
||||
* Bolt is good for read intensive workloads. Sequential write performance is
|
||||
also fast but random writes can be slow. You can use `DB.Batch()` or add a
|
||||
write-ahead log to help mitigate this issue.
|
||||
|
||||
* Bolt uses a B+tree internally so there can be a lot of random page access.
|
||||
SSDs provide a significant performance boost over spinning disks.
|
||||
|
||||
* Try to avoid long running read transactions. Bolt uses copy-on-write so
|
||||
old pages cannot be reclaimed while an old transaction is using them.
|
||||
|
||||
* Byte slices returned from Bolt are only valid during a transaction. Once the
|
||||
transaction has been committed or rolled back then the memory they point to
|
||||
can be reused by a new page or can be unmapped from virtual memory and you'll
|
||||
see an `unexpected fault address` panic when accessing it.
|
||||
|
||||
* Bolt uses an exclusive write lock on the database file so it cannot be
|
||||
shared by multiple processes.
|
||||
|
||||
* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
|
||||
buckets that have random inserts will cause your database to have very poor
|
||||
page utilization.
|
||||
|
||||
* Use larger buckets in general. Smaller buckets causes poor page utilization
|
||||
once they become larger than the page size (typically 4KB).
|
||||
|
||||
* Bulk loading a lot of random writes into a new bucket can be slow as the
|
||||
page will not split until the transaction is committed. Randomly inserting
|
||||
more than 100,000 key/value pairs into a single new bucket in a single
|
||||
transaction is not advised.
|
||||
|
||||
* Bolt uses a memory-mapped file so the underlying operating system handles the
|
||||
caching of the data. Typically, the OS will cache as much of the file as it
|
||||
can in memory and will release memory as needed to other processes. This means
|
||||
that Bolt can show very high memory usage when working with large databases.
|
||||
However, this is expected and the OS will release memory as needed. Bolt can
|
||||
handle databases much larger than the available physical RAM, provided its
|
||||
memory-map fits in the process virtual address space. It may be problematic
|
||||
on 32-bits systems.
|
||||
|
||||
* The data structures in the Bolt database are memory mapped so the data file
|
||||
will be endian specific. This means that you cannot copy a Bolt file from a
|
||||
little endian machine to a big endian machine and have it work. For most
|
||||
users this is not a concern since most modern CPUs are little endian.
|
||||
|
||||
* Because of the way pages are laid out on disk, Bolt cannot truncate data files
|
||||
and return free pages back to the disk. Instead, Bolt maintains a free list
|
||||
of unused pages within its data file. These free pages can be reused by later
|
||||
transactions. This works well for many use cases as databases generally tend
|
||||
to grow. However, it's important to note that deleting large chunks of data
|
||||
will not allow you to reclaim that space on disk.
|
||||
|
||||
For more information on page allocation, [see this comment][page-allocation].
|
||||
|
||||
[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
|
||||
|
||||
|
||||
## Reading the Source
|
||||
|
||||
Bolt is a relatively small code base (<3KLOC) for an embedded, serializable,
|
||||
transactional key/value database so it can be a good starting point for people
|
||||
interested in how databases work.
|
||||
|
||||
The best places to start are the main entry points into Bolt:
|
||||
|
||||
- `Open()` - Initializes the reference to the database. It's responsible for
|
||||
creating the database if it doesn't exist, obtaining an exclusive lock on the
|
||||
file, reading the meta pages, & memory-mapping the file.
|
||||
|
||||
- `DB.Begin()` - Starts a read-only or read-write transaction depending on the
|
||||
value of the `writable` argument. This requires briefly obtaining the "meta"
|
||||
lock to keep track of open transactions. Only one read-write transaction can
|
||||
exist at a time so the "rwlock" is acquired during the life of a read-write
|
||||
transaction.
|
||||
|
||||
- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the
|
||||
arguments, a cursor is used to traverse the B+tree to the page and position
|
||||
where they key & value will be written. Once the position is found, the bucket
|
||||
materializes the underlying page and the page's parent pages into memory as
|
||||
"nodes". These nodes are where mutations occur during read-write transactions.
|
||||
These changes get flushed to disk during commit.
|
||||
|
||||
- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor
|
||||
to move to the page & position of a key/value pair. During a read-only
|
||||
transaction, the key and value data is returned as a direct reference to the
|
||||
underlying mmap file so there's no allocation overhead. For read-write
|
||||
transactions, this data may reference the mmap file or one of the in-memory
|
||||
node values.
|
||||
|
||||
- `Cursor` - This object is simply for traversing the B+tree of on-disk pages
|
||||
or in-memory nodes. It can seek to a specific key, move to the first or last
|
||||
value, or it can move forward or backward. The cursor handles the movement up
|
||||
and down the B+tree transparently to the end user.
|
||||
|
||||
- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages
|
||||
into pages to be written to disk. Writing to disk then occurs in two phases.
|
||||
First, the dirty pages are written to disk and an `fsync()` occurs. Second, a
|
||||
new meta page with an incremented transaction ID is written and another
|
||||
`fsync()` occurs. This two phase write ensures that partially written data
|
||||
pages are ignored in the event of a crash since the meta page pointing to them
|
||||
is never written. Partially written meta pages are invalidated because they
|
||||
are written with a checksum.
|
||||
|
||||
If you have additional notes that could be helpful for others, please submit
|
||||
them via pull request.
|
||||
|
||||
|
||||
## Other Projects Using Bolt
|
||||
|
||||
Below is a list of public, open source projects that use Bolt:
|
||||
|
||||
* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
|
||||
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
|
||||
* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
|
||||
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
|
||||
* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
|
||||
* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
|
||||
* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
|
||||
* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
|
||||
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
|
||||
* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
|
||||
* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
|
||||
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
|
||||
* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
|
||||
* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
|
||||
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
|
||||
* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
|
||||
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
|
||||
* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
|
||||
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
|
||||
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
|
||||
* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
|
||||
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
|
||||
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
|
||||
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
|
||||
* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
|
||||
* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
|
||||
* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
|
||||
backed by boltdb.
|
||||
* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
|
||||
simple tx and key scans.
|
||||
* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
|
||||
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
|
||||
* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
|
||||
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
|
||||
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
|
||||
* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
|
||||
* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
|
||||
* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
|
||||
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
|
||||
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
|
||||
* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
|
||||
* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
|
||||
* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
|
||||
* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
|
||||
* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
|
||||
* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
|
||||
* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
|
||||
* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
|
||||
* [Ponzu CMS](https://ponzu-cms.org) - Headless CMS + automatic JSON API with auto-HTTPS, HTTP/2 Server Push, and flexible server framework.
|
||||
|
||||
If you are using Bolt in a project please send a pull request to add it to the list.
|
10
vendor/github.com/boltdb/bolt/bolt_386.go
generated
vendored
Normal file
10
vendor/github.com/boltdb/bolt/bolt_386.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
10
vendor/github.com/boltdb/bolt/bolt_amd64.go
generated
vendored
Normal file
10
vendor/github.com/boltdb/bolt/bolt_amd64.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
28
vendor/github.com/boltdb/bolt/bolt_arm.go
generated
vendored
Normal file
28
vendor/github.com/boltdb/bolt/bolt_arm.go
generated
vendored
Normal file
@ -0,0 +1,28 @@
|
||||
package bolt
|
||||
|
||||
import "unsafe"
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned bool
|
||||
|
||||
func init() {
|
||||
// Simple check to see whether this arch handles unaligned load/stores
|
||||
// correctly.
|
||||
|
||||
// ARM9 and older devices require load/stores to be from/to aligned
|
||||
// addresses. If not, the lower 2 bits are cleared and that address is
|
||||
// read in a jumbled up order.
|
||||
|
||||
// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
|
||||
|
||||
raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
|
||||
val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
|
||||
|
||||
brokenUnaligned = val != 0x11222211
|
||||
}
|
12
vendor/github.com/boltdb/bolt/bolt_arm64.go
generated
vendored
Normal file
12
vendor/github.com/boltdb/bolt/bolt_arm64.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// +build arm64
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
10
vendor/github.com/boltdb/bolt/bolt_linux.go
generated
vendored
Normal file
10
vendor/github.com/boltdb/bolt/bolt_linux.go
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
// fdatasync flushes written data to a file descriptor.
|
||||
func fdatasync(db *DB) error {
|
||||
return syscall.Fdatasync(int(db.file.Fd()))
|
||||
}
|
27
vendor/github.com/boltdb/bolt/bolt_openbsd.go
generated
vendored
Normal file
27
vendor/github.com/boltdb/bolt/bolt_openbsd.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
msAsync = 1 << iota // perform asynchronous writes
|
||||
msSync // perform synchronous writes
|
||||
msInvalidate // invalidate cached data
|
||||
)
|
||||
|
||||
func msync(db *DB) error {
|
||||
_, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate)
|
||||
if errno != 0 {
|
||||
return errno
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func fdatasync(db *DB) error {
|
||||
if db.data != nil {
|
||||
return msync(db)
|
||||
}
|
||||
return db.file.Sync()
|
||||
}
|
9
vendor/github.com/boltdb/bolt/bolt_ppc.go
generated
vendored
Normal file
9
vendor/github.com/boltdb/bolt/bolt_ppc.go
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
// +build ppc
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0x7FFFFFFF // 2GB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0xFFFFFFF
|
12
vendor/github.com/boltdb/bolt/bolt_ppc64.go
generated
vendored
Normal file
12
vendor/github.com/boltdb/bolt/bolt_ppc64.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// +build ppc64
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
12
vendor/github.com/boltdb/bolt/bolt_ppc64le.go
generated
vendored
Normal file
12
vendor/github.com/boltdb/bolt/bolt_ppc64le.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// +build ppc64le
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
12
vendor/github.com/boltdb/bolt/bolt_s390x.go
generated
vendored
Normal file
12
vendor/github.com/boltdb/bolt/bolt_s390x.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
// +build s390x
|
||||
|
||||
package bolt
|
||||
|
||||
// maxMapSize represents the largest mmap size supported by Bolt.
|
||||
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
|
||||
|
||||
// maxAllocSize is the size used when creating array pointers.
|
||||
const maxAllocSize = 0x7FFFFFFF
|
||||
|
||||
// Are unaligned load/stores broken on this arch?
|
||||
var brokenUnaligned = false
|
89
vendor/github.com/boltdb/bolt/bolt_unix.go
generated
vendored
Normal file
89
vendor/github.com/boltdb/bolt/bolt_unix.go
generated
vendored
Normal file
@ -0,0 +1,89 @@
|
||||
// +build !windows,!plan9,!solaris
|
||||
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
||||
var t time.Time
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
flag := syscall.LOCK_SH
|
||||
if exclusive {
|
||||
flag = syscall.LOCK_EX
|
||||
}
|
||||
|
||||
// Otherwise attempt to obtain an exclusive lock.
|
||||
err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB)
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != syscall.EWOULDBLOCK {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
// funlock releases an advisory lock on a file descriptor.
|
||||
func funlock(db *DB) error {
|
||||
return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN)
|
||||
}
|
||||
|
||||
// mmap memory maps a DB's data file.
|
||||
func mmap(db *DB, sz int) error {
|
||||
// Map the data file to memory.
|
||||
b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Advise the kernel that the mmap is accessed randomly.
|
||||
if err := madvise(b, syscall.MADV_RANDOM); err != nil {
|
||||
return fmt.Errorf("madvise: %s", err)
|
||||
}
|
||||
|
||||
// Save the original byte slice and convert to a byte array pointer.
|
||||
db.dataref = b
|
||||
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
|
||||
db.datasz = sz
|
||||
return nil
|
||||
}
|
||||
|
||||
// munmap unmaps a DB's data file from memory.
|
||||
func munmap(db *DB) error {
|
||||
// Ignore the unmap if we have no mapped data.
|
||||
if db.dataref == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmap using the original byte slice.
|
||||
err := syscall.Munmap(db.dataref)
|
||||
db.dataref = nil
|
||||
db.data = nil
|
||||
db.datasz = 0
|
||||
return err
|
||||
}
|
||||
|
||||
// NOTE: This function is copied from stdlib because it is not available on darwin.
|
||||
func madvise(b []byte, advice int) (err error) {
|
||||
_, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
|
||||
if e1 != 0 {
|
||||
err = e1
|
||||
}
|
||||
return
|
||||
}
|
90
vendor/github.com/boltdb/bolt/bolt_unix_solaris.go
generated
vendored
Normal file
90
vendor/github.com/boltdb/bolt/bolt_unix_solaris.go
generated
vendored
Normal file
@ -0,0 +1,90 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
||||
var t time.Time
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
var lock syscall.Flock_t
|
||||
lock.Start = 0
|
||||
lock.Len = 0
|
||||
lock.Pid = 0
|
||||
lock.Whence = 0
|
||||
lock.Pid = 0
|
||||
if exclusive {
|
||||
lock.Type = syscall.F_WRLCK
|
||||
} else {
|
||||
lock.Type = syscall.F_RDLCK
|
||||
}
|
||||
err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock)
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != syscall.EAGAIN {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
// funlock releases an advisory lock on a file descriptor.
|
||||
func funlock(db *DB) error {
|
||||
var lock syscall.Flock_t
|
||||
lock.Start = 0
|
||||
lock.Len = 0
|
||||
lock.Type = syscall.F_UNLCK
|
||||
lock.Whence = 0
|
||||
return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
|
||||
}
|
||||
|
||||
// mmap memory maps a DB's data file.
|
||||
func mmap(db *DB, sz int) error {
|
||||
// Map the data file to memory.
|
||||
b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Advise the kernel that the mmap is accessed randomly.
|
||||
if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
|
||||
return fmt.Errorf("madvise: %s", err)
|
||||
}
|
||||
|
||||
// Save the original byte slice and convert to a byte array pointer.
|
||||
db.dataref = b
|
||||
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
|
||||
db.datasz = sz
|
||||
return nil
|
||||
}
|
||||
|
||||
// munmap unmaps a DB's data file from memory.
|
||||
func munmap(db *DB) error {
|
||||
// Ignore the unmap if we have no mapped data.
|
||||
if db.dataref == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmap using the original byte slice.
|
||||
err := unix.Munmap(db.dataref)
|
||||
db.dataref = nil
|
||||
db.data = nil
|
||||
db.datasz = 0
|
||||
return err
|
||||
}
|
144
vendor/github.com/boltdb/bolt/bolt_windows.go
generated
vendored
Normal file
144
vendor/github.com/boltdb/bolt/bolt_windows.go
generated
vendored
Normal file
@ -0,0 +1,144 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
|
||||
var (
|
||||
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
procLockFileEx = modkernel32.NewProc("LockFileEx")
|
||||
procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
|
||||
)
|
||||
|
||||
const (
|
||||
lockExt = ".lock"
|
||||
|
||||
// see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
|
||||
flagLockExclusive = 2
|
||||
flagLockFailImmediately = 1
|
||||
|
||||
// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
|
||||
errLockViolation syscall.Errno = 0x21
|
||||
)
|
||||
|
||||
func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
|
||||
r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
|
||||
if r == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
|
||||
r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
|
||||
if r == 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// fdatasync flushes written data to a file descriptor.
|
||||
func fdatasync(db *DB) error {
|
||||
return db.file.Sync()
|
||||
}
|
||||
|
||||
// flock acquires an advisory lock on a file descriptor.
|
||||
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
|
||||
// Create a separate lock file on windows because a process
|
||||
// cannot share an exclusive lock on the same file. This is
|
||||
// needed during Tx.WriteTo().
|
||||
f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
db.lockfile = f
|
||||
|
||||
var t time.Time
|
||||
for {
|
||||
// If we're beyond our timeout then return an error.
|
||||
// This can only occur after we've attempted a flock once.
|
||||
if t.IsZero() {
|
||||
t = time.Now()
|
||||
} else if timeout > 0 && time.Since(t) > timeout {
|
||||
return ErrTimeout
|
||||
}
|
||||
|
||||
var flag uint32 = flagLockFailImmediately
|
||||
if exclusive {
|
||||
flag |= flagLockExclusive
|
||||
}
|
||||
|
||||
err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{})
|
||||
if err == nil {
|
||||
return nil
|
||||
} else if err != errLockViolation {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for a bit and try again.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
|
||||
// funlock releases an advisory lock on a file descriptor.
|
||||
func funlock(db *DB) error {
|
||||
err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
|
||||
db.lockfile.Close()
|
||||
os.Remove(db.path + lockExt)
|
||||
return err
|
||||
}
|
||||
|
||||
// mmap memory maps a DB's data file.
|
||||
// Based on: https://github.com/edsrzf/mmap-go
|
||||
func mmap(db *DB, sz int) error {
|
||||
if !db.readOnly {
|
||||
// Truncate the database to the size of the mmap.
|
||||
if err := db.file.Truncate(int64(sz)); err != nil {
|
||||
return fmt.Errorf("truncate: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Open a file mapping handle.
|
||||
sizelo := uint32(sz >> 32)
|
||||
sizehi := uint32(sz) & 0xffffffff
|
||||
h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil)
|
||||
if h == 0 {
|
||||
return os.NewSyscallError("CreateFileMapping", errno)
|
||||
}
|
||||
|
||||
// Create the memory map.
|
||||
addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz))
|
||||
if addr == 0 {
|
||||
return os.NewSyscallError("MapViewOfFile", errno)
|
||||
}
|
||||
|
||||
// Close mapping handle.
|
||||
if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
|
||||
return os.NewSyscallError("CloseHandle", err)
|
||||
}
|
||||
|
||||
// Convert to a byte array.
|
||||
db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr)))
|
||||
db.datasz = sz
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// munmap unmaps a pointer from a file.
|
||||
// Based on: https://github.com/edsrzf/mmap-go
|
||||
func munmap(db *DB) error {
|
||||
if db.data == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
addr := (uintptr)(unsafe.Pointer(&db.data[0]))
|
||||
if err := syscall.UnmapViewOfFile(addr); err != nil {
|
||||
return os.NewSyscallError("UnmapViewOfFile", err)
|
||||
}
|
||||
return nil
|
||||
}
|
8
vendor/github.com/boltdb/bolt/boltsync_unix.go
generated
vendored
Normal file
8
vendor/github.com/boltdb/bolt/boltsync_unix.go
generated
vendored
Normal file
@ -0,0 +1,8 @@
|
||||
// +build !windows,!plan9,!linux,!openbsd
|
||||
|
||||
package bolt
|
||||
|
||||
// fdatasync flushes written data to a file descriptor.
|
||||
func fdatasync(db *DB) error {
|
||||
return db.file.Sync()
|
||||
}
|
777
vendor/github.com/boltdb/bolt/bucket.go
generated
vendored
Normal file
777
vendor/github.com/boltdb/bolt/bucket.go
generated
vendored
Normal file
@ -0,0 +1,777 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaxKeySize is the maximum length of a key, in bytes.
|
||||
MaxKeySize = 32768
|
||||
|
||||
// MaxValueSize is the maximum length of a value, in bytes.
|
||||
MaxValueSize = (1 << 31) - 2
|
||||
)
|
||||
|
||||
const (
|
||||
maxUint = ^uint(0)
|
||||
minUint = 0
|
||||
maxInt = int(^uint(0) >> 1)
|
||||
minInt = -maxInt - 1
|
||||
)
|
||||
|
||||
const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
|
||||
|
||||
const (
|
||||
minFillPercent = 0.1
|
||||
maxFillPercent = 1.0
|
||||
)
|
||||
|
||||
// DefaultFillPercent is the percentage that split pages are filled.
|
||||
// This value can be changed by setting Bucket.FillPercent.
|
||||
const DefaultFillPercent = 0.5
|
||||
|
||||
// Bucket represents a collection of key/value pairs inside the database.
|
||||
type Bucket struct {
|
||||
*bucket
|
||||
tx *Tx // the associated transaction
|
||||
buckets map[string]*Bucket // subbucket cache
|
||||
page *page // inline page reference
|
||||
rootNode *node // materialized node for the root page.
|
||||
nodes map[pgid]*node // node cache
|
||||
|
||||
// Sets the threshold for filling nodes when they split. By default,
|
||||
// the bucket will fill to 50% but it can be useful to increase this
|
||||
// amount if you know that your write workloads are mostly append-only.
|
||||
//
|
||||
// This is non-persisted across transactions so it must be set in every Tx.
|
||||
FillPercent float64
|
||||
}
|
||||
|
||||
// bucket represents the on-file representation of a bucket.
|
||||
// This is stored as the "value" of a bucket key. If the bucket is small enough,
|
||||
// then its root page can be stored inline in the "value", after the bucket
|
||||
// header. In the case of inline buckets, the "root" will be 0.
|
||||
type bucket struct {
|
||||
root pgid // page id of the bucket's root-level page
|
||||
sequence uint64 // monotonically incrementing, used by NextSequence()
|
||||
}
|
||||
|
||||
// newBucket returns a new bucket associated with a transaction.
|
||||
func newBucket(tx *Tx) Bucket {
|
||||
var b = Bucket{tx: tx, FillPercent: DefaultFillPercent}
|
||||
if tx.writable {
|
||||
b.buckets = make(map[string]*Bucket)
|
||||
b.nodes = make(map[pgid]*node)
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Tx returns the tx of the bucket.
|
||||
func (b *Bucket) Tx() *Tx {
|
||||
return b.tx
|
||||
}
|
||||
|
||||
// Root returns the root of the bucket.
|
||||
func (b *Bucket) Root() pgid {
|
||||
return b.root
|
||||
}
|
||||
|
||||
// Writable returns whether the bucket is writable.
|
||||
func (b *Bucket) Writable() bool {
|
||||
return b.tx.writable
|
||||
}
|
||||
|
||||
// Cursor creates a cursor associated with the bucket.
|
||||
// The cursor is only valid as long as the transaction is open.
|
||||
// Do not use a cursor after the transaction is closed.
|
||||
func (b *Bucket) Cursor() *Cursor {
|
||||
// Update transaction statistics.
|
||||
b.tx.stats.CursorCount++
|
||||
|
||||
// Allocate and return a cursor.
|
||||
return &Cursor{
|
||||
bucket: b,
|
||||
stack: make([]elemRef, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Bucket retrieves a nested bucket by name.
|
||||
// Returns nil if the bucket does not exist.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (b *Bucket) Bucket(name []byte) *Bucket {
|
||||
if b.buckets != nil {
|
||||
if child := b.buckets[string(name)]; child != nil {
|
||||
return child
|
||||
}
|
||||
}
|
||||
|
||||
// Move cursor to key.
|
||||
c := b.Cursor()
|
||||
k, v, flags := c.seek(name)
|
||||
|
||||
// Return nil if the key doesn't exist or it is not a bucket.
|
||||
if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Otherwise create a bucket and cache it.
|
||||
var child = b.openBucket(v)
|
||||
if b.buckets != nil {
|
||||
b.buckets[string(name)] = child
|
||||
}
|
||||
|
||||
return child
|
||||
}
|
||||
|
||||
// Helper method that re-interprets a sub-bucket value
|
||||
// from a parent into a Bucket
|
||||
func (b *Bucket) openBucket(value []byte) *Bucket {
|
||||
var child = newBucket(b.tx)
|
||||
|
||||
// If unaligned load/stores are broken on this arch and value is
|
||||
// unaligned simply clone to an aligned byte array.
|
||||
unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
|
||||
|
||||
if unaligned {
|
||||
value = cloneBytes(value)
|
||||
}
|
||||
|
||||
// If this is a writable transaction then we need to copy the bucket entry.
|
||||
// Read-only transactions can point directly at the mmap entry.
|
||||
if b.tx.writable && !unaligned {
|
||||
child.bucket = &bucket{}
|
||||
*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
|
||||
} else {
|
||||
child.bucket = (*bucket)(unsafe.Pointer(&value[0]))
|
||||
}
|
||||
|
||||
// Save a reference to the inline page if the bucket is inline.
|
||||
if child.root == 0 {
|
||||
child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
|
||||
}
|
||||
|
||||
return &child
|
||||
}
|
||||
|
||||
// CreateBucket creates a new bucket at the given key and returns the new bucket.
|
||||
// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
|
||||
if b.tx.db == nil {
|
||||
return nil, ErrTxClosed
|
||||
} else if !b.tx.writable {
|
||||
return nil, ErrTxNotWritable
|
||||
} else if len(key) == 0 {
|
||||
return nil, ErrBucketNameRequired
|
||||
}
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
k, _, flags := c.seek(key)
|
||||
|
||||
// Return an error if there is an existing key.
|
||||
if bytes.Equal(key, k) {
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return nil, ErrBucketExists
|
||||
}
|
||||
return nil, ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Create empty, inline bucket.
|
||||
var bucket = Bucket{
|
||||
bucket: &bucket{},
|
||||
rootNode: &node{isLeaf: true},
|
||||
FillPercent: DefaultFillPercent,
|
||||
}
|
||||
var value = bucket.write()
|
||||
|
||||
// Insert into node.
|
||||
key = cloneBytes(key)
|
||||
c.node().put(key, key, value, 0, bucketLeafFlag)
|
||||
|
||||
// Since subbuckets are not allowed on inline buckets, we need to
|
||||
// dereference the inline page, if it exists. This will cause the bucket
|
||||
// to be treated as a regular, non-inline bucket for the rest of the tx.
|
||||
b.page = nil
|
||||
|
||||
return b.Bucket(key), nil
|
||||
}
|
||||
|
||||
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
|
||||
// Returns an error if the bucket name is blank, or if the bucket name is too long.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
|
||||
child, err := b.CreateBucket(key)
|
||||
if err == ErrBucketExists {
|
||||
return b.Bucket(key), nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return child, nil
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket at the given key.
|
||||
// Returns an error if the bucket does not exists, or if the key represents a non-bucket value.
|
||||
func (b *Bucket) DeleteBucket(key []byte) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
k, _, flags := c.seek(key)
|
||||
|
||||
// Return an error if bucket doesn't exist or is not a bucket.
|
||||
if !bytes.Equal(key, k) {
|
||||
return ErrBucketNotFound
|
||||
} else if (flags & bucketLeafFlag) == 0 {
|
||||
return ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Recursively delete all child buckets.
|
||||
child := b.Bucket(key)
|
||||
err := child.ForEach(func(k, v []byte) error {
|
||||
if v == nil {
|
||||
if err := child.DeleteBucket(k); err != nil {
|
||||
return fmt.Errorf("delete bucket: %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove cached copy.
|
||||
delete(b.buckets, string(key))
|
||||
|
||||
// Release all bucket pages to freelist.
|
||||
child.nodes = nil
|
||||
child.rootNode = nil
|
||||
child.free()
|
||||
|
||||
// Delete the node if we have a matching key.
|
||||
c.node().del(key)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get retrieves the value for a key in the bucket.
|
||||
// Returns a nil value if the key does not exist or if the key is a nested bucket.
|
||||
// The returned value is only valid for the life of the transaction.
|
||||
func (b *Bucket) Get(key []byte) []byte {
|
||||
k, v, flags := b.Cursor().seek(key)
|
||||
|
||||
// Return nil if this is a bucket.
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If our target node isn't the same key as what's passed in then return nil.
|
||||
if !bytes.Equal(key, k) {
|
||||
return nil
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Put sets the value for a key in the bucket.
|
||||
// If the key exist then its previous value will be overwritten.
|
||||
// Supplied value must remain valid for the life of the transaction.
|
||||
// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large.
|
||||
func (b *Bucket) Put(key []byte, value []byte) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
} else if len(key) == 0 {
|
||||
return ErrKeyRequired
|
||||
} else if len(key) > MaxKeySize {
|
||||
return ErrKeyTooLarge
|
||||
} else if int64(len(value)) > MaxValueSize {
|
||||
return ErrValueTooLarge
|
||||
}
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
k, _, flags := c.seek(key)
|
||||
|
||||
// Return an error if there is an existing key with a bucket value.
|
||||
if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 {
|
||||
return ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Insert into node.
|
||||
key = cloneBytes(key)
|
||||
c.node().put(key, key, value, 0, 0)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete removes a key from the bucket.
|
||||
// If the key does not exist then nothing is done and a nil error is returned.
|
||||
// Returns an error if the bucket was created from a read-only transaction.
|
||||
func (b *Bucket) Delete(key []byte) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
// Move cursor to correct position.
|
||||
c := b.Cursor()
|
||||
_, _, flags := c.seek(key)
|
||||
|
||||
// Return an error if there is already existing bucket value.
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return ErrIncompatibleValue
|
||||
}
|
||||
|
||||
// Delete the node if we have a matching key.
|
||||
c.node().del(key)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sequence returns the current integer for the bucket without incrementing it.
|
||||
func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
|
||||
|
||||
// SetSequence updates the sequence number for the bucket.
|
||||
func (b *Bucket) SetSequence(v uint64) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
// Materialize the root node if it hasn't been already so that the
|
||||
// bucket will be saved during commit.
|
||||
if b.rootNode == nil {
|
||||
_ = b.node(b.root, nil)
|
||||
}
|
||||
|
||||
// Increment and return the sequence.
|
||||
b.bucket.sequence = v
|
||||
return nil
|
||||
}
|
||||
|
||||
// NextSequence returns an autoincrementing integer for the bucket.
|
||||
func (b *Bucket) NextSequence() (uint64, error) {
|
||||
if b.tx.db == nil {
|
||||
return 0, ErrTxClosed
|
||||
} else if !b.Writable() {
|
||||
return 0, ErrTxNotWritable
|
||||
}
|
||||
|
||||
// Materialize the root node if it hasn't been already so that the
|
||||
// bucket will be saved during commit.
|
||||
if b.rootNode == nil {
|
||||
_ = b.node(b.root, nil)
|
||||
}
|
||||
|
||||
// Increment and return the sequence.
|
||||
b.bucket.sequence++
|
||||
return b.bucket.sequence, nil
|
||||
}
|
||||
|
||||
// ForEach executes a function for each key/value pair in a bucket.
|
||||
// If the provided function returns an error then the iteration is stopped and
|
||||
// the error is returned to the caller. The provided function must not modify
|
||||
// the bucket; this will result in undefined behavior.
|
||||
func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
|
||||
if b.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
}
|
||||
c := b.Cursor()
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if err := fn(k, v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stat returns stats on a bucket.
|
||||
func (b *Bucket) Stats() BucketStats {
|
||||
var s, subStats BucketStats
|
||||
pageSize := b.tx.db.pageSize
|
||||
s.BucketN += 1
|
||||
if b.root == 0 {
|
||||
s.InlineBucketN += 1
|
||||
}
|
||||
b.forEachPage(func(p *page, depth int) {
|
||||
if (p.flags & leafPageFlag) != 0 {
|
||||
s.KeyN += int(p.count)
|
||||
|
||||
// used totals the used bytes for the page
|
||||
used := pageHeaderSize
|
||||
|
||||
if p.count != 0 {
|
||||
// If page has any elements, add all element headers.
|
||||
used += leafPageElementSize * int(p.count-1)
|
||||
|
||||
// Add all element key, value sizes.
|
||||
// The computation takes advantage of the fact that the position
|
||||
// of the last element's key/value equals to the total of the sizes
|
||||
// of all previous elements' keys and values.
|
||||
// It also includes the last element's header.
|
||||
lastElement := p.leafPageElement(p.count - 1)
|
||||
used += int(lastElement.pos + lastElement.ksize + lastElement.vsize)
|
||||
}
|
||||
|
||||
if b.root == 0 {
|
||||
// For inlined bucket just update the inline stats
|
||||
s.InlineBucketInuse += used
|
||||
} else {
|
||||
// For non-inlined bucket update all the leaf stats
|
||||
s.LeafPageN++
|
||||
s.LeafInuse += used
|
||||
s.LeafOverflowN += int(p.overflow)
|
||||
|
||||
// Collect stats from sub-buckets.
|
||||
// Do that by iterating over all element headers
|
||||
// looking for the ones with the bucketLeafFlag.
|
||||
for i := uint16(0); i < p.count; i++ {
|
||||
e := p.leafPageElement(i)
|
||||
if (e.flags & bucketLeafFlag) != 0 {
|
||||
// For any bucket element, open the element value
|
||||
// and recursively call Stats on the contained bucket.
|
||||
subStats.Add(b.openBucket(e.value()).Stats())
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (p.flags & branchPageFlag) != 0 {
|
||||
s.BranchPageN++
|
||||
lastElement := p.branchPageElement(p.count - 1)
|
||||
|
||||
// used totals the used bytes for the page
|
||||
// Add header and all element headers.
|
||||
used := pageHeaderSize + (branchPageElementSize * int(p.count-1))
|
||||
|
||||
// Add size of all keys and values.
|
||||
// Again, use the fact that last element's position equals to
|
||||
// the total of key, value sizes of all previous elements.
|
||||
used += int(lastElement.pos + lastElement.ksize)
|
||||
s.BranchInuse += used
|
||||
s.BranchOverflowN += int(p.overflow)
|
||||
}
|
||||
|
||||
// Keep track of maximum page depth.
|
||||
if depth+1 > s.Depth {
|
||||
s.Depth = (depth + 1)
|
||||
}
|
||||
})
|
||||
|
||||
// Alloc stats can be computed from page counts and pageSize.
|
||||
s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize
|
||||
s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize
|
||||
|
||||
// Add the max depth of sub-buckets to get total nested depth.
|
||||
s.Depth += subStats.Depth
|
||||
// Add the stats for all sub-buckets
|
||||
s.Add(subStats)
|
||||
return s
|
||||
}
|
||||
|
||||
// forEachPage iterates over every page in a bucket, including inline pages.
|
||||
func (b *Bucket) forEachPage(fn func(*page, int)) {
|
||||
// If we have an inline page then just use that.
|
||||
if b.page != nil {
|
||||
fn(b.page, 0)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise traverse the page hierarchy.
|
||||
b.tx.forEachPage(b.root, 0, fn)
|
||||
}
|
||||
|
||||
// forEachPageNode iterates over every page (or node) in a bucket.
|
||||
// This also includes inline pages.
|
||||
func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
|
||||
// If we have an inline page or root node then just use that.
|
||||
if b.page != nil {
|
||||
fn(b.page, nil, 0)
|
||||
return
|
||||
}
|
||||
b._forEachPageNode(b.root, 0, fn)
|
||||
}
|
||||
|
||||
func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) {
|
||||
var p, n = b.pageNode(pgid)
|
||||
|
||||
// Execute function.
|
||||
fn(p, n, depth)
|
||||
|
||||
// Recursively loop over children.
|
||||
if p != nil {
|
||||
if (p.flags & branchPageFlag) != 0 {
|
||||
for i := 0; i < int(p.count); i++ {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
b._forEachPageNode(elem.pgid, depth+1, fn)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if !n.isLeaf {
|
||||
for _, inode := range n.inodes {
|
||||
b._forEachPageNode(inode.pgid, depth+1, fn)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// spill writes all the nodes for this bucket to dirty pages.
|
||||
func (b *Bucket) spill() error {
|
||||
// Spill all child buckets first.
|
||||
for name, child := range b.buckets {
|
||||
// If the child bucket is small enough and it has no child buckets then
|
||||
// write it inline into the parent bucket's page. Otherwise spill it
|
||||
// like a normal bucket and make the parent value a pointer to the page.
|
||||
var value []byte
|
||||
if child.inlineable() {
|
||||
child.free()
|
||||
value = child.write()
|
||||
} else {
|
||||
if err := child.spill(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update the child bucket header in this bucket.
|
||||
value = make([]byte, unsafe.Sizeof(bucket{}))
|
||||
var bucket = (*bucket)(unsafe.Pointer(&value[0]))
|
||||
*bucket = *child.bucket
|
||||
}
|
||||
|
||||
// Skip writing the bucket if there are no materialized nodes.
|
||||
if child.rootNode == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Update parent node.
|
||||
var c = b.Cursor()
|
||||
k, _, flags := c.seek([]byte(name))
|
||||
if !bytes.Equal([]byte(name), k) {
|
||||
panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
|
||||
}
|
||||
if flags&bucketLeafFlag == 0 {
|
||||
panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
|
||||
}
|
||||
c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
|
||||
}
|
||||
|
||||
// Ignore if there's not a materialized root node.
|
||||
if b.rootNode == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Spill nodes.
|
||||
if err := b.rootNode.spill(); err != nil {
|
||||
return err
|
||||
}
|
||||
b.rootNode = b.rootNode.root()
|
||||
|
||||
// Update the root node for this bucket.
|
||||
if b.rootNode.pgid >= b.tx.meta.pgid {
|
||||
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid))
|
||||
}
|
||||
b.root = b.rootNode.pgid
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// inlineable returns true if a bucket is small enough to be written inline
|
||||
// and if it contains no subbuckets. Otherwise returns false.
|
||||
func (b *Bucket) inlineable() bool {
|
||||
var n = b.rootNode
|
||||
|
||||
// Bucket must only contain a single leaf node.
|
||||
if n == nil || !n.isLeaf {
|
||||
return false
|
||||
}
|
||||
|
||||
// Bucket is not inlineable if it contains subbuckets or if it goes beyond
|
||||
// our threshold for inline bucket size.
|
||||
var size = pageHeaderSize
|
||||
for _, inode := range n.inodes {
|
||||
size += leafPageElementSize + len(inode.key) + len(inode.value)
|
||||
|
||||
if inode.flags&bucketLeafFlag != 0 {
|
||||
return false
|
||||
} else if size > b.maxInlineBucketSize() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns the maximum total size of a bucket to make it a candidate for inlining.
|
||||
func (b *Bucket) maxInlineBucketSize() int {
|
||||
return b.tx.db.pageSize / 4
|
||||
}
|
||||
|
||||
// write allocates and writes a bucket to a byte slice.
|
||||
func (b *Bucket) write() []byte {
|
||||
// Allocate the appropriate size.
|
||||
var n = b.rootNode
|
||||
var value = make([]byte, bucketHeaderSize+n.size())
|
||||
|
||||
// Write a bucket header.
|
||||
var bucket = (*bucket)(unsafe.Pointer(&value[0]))
|
||||
*bucket = *b.bucket
|
||||
|
||||
// Convert byte slice to a fake page and write the root node.
|
||||
var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
|
||||
n.write(p)
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
// rebalance attempts to balance all nodes.
|
||||
func (b *Bucket) rebalance() {
|
||||
for _, n := range b.nodes {
|
||||
n.rebalance()
|
||||
}
|
||||
for _, child := range b.buckets {
|
||||
child.rebalance()
|
||||
}
|
||||
}
|
||||
|
||||
// node creates a node from a page and associates it with a given parent.
|
||||
func (b *Bucket) node(pgid pgid, parent *node) *node {
|
||||
_assert(b.nodes != nil, "nodes map expected")
|
||||
|
||||
// Retrieve node if it's already been created.
|
||||
if n := b.nodes[pgid]; n != nil {
|
||||
return n
|
||||
}
|
||||
|
||||
// Otherwise create a node and cache it.
|
||||
n := &node{bucket: b, parent: parent}
|
||||
if parent == nil {
|
||||
b.rootNode = n
|
||||
} else {
|
||||
parent.children = append(parent.children, n)
|
||||
}
|
||||
|
||||
// Use the inline page if this is an inline bucket.
|
||||
var p = b.page
|
||||
if p == nil {
|
||||
p = b.tx.page(pgid)
|
||||
}
|
||||
|
||||
// Read the page into the node and cache it.
|
||||
n.read(p)
|
||||
b.nodes[pgid] = n
|
||||
|
||||
// Update statistics.
|
||||
b.tx.stats.NodeCount++
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
// free recursively frees all pages in the bucket.
|
||||
func (b *Bucket) free() {
|
||||
if b.root == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var tx = b.tx
|
||||
b.forEachPageNode(func(p *page, n *node, _ int) {
|
||||
if p != nil {
|
||||
tx.db.freelist.free(tx.meta.txid, p)
|
||||
} else {
|
||||
n.free()
|
||||
}
|
||||
})
|
||||
b.root = 0
|
||||
}
|
||||
|
||||
// dereference removes all references to the old mmap.
|
||||
func (b *Bucket) dereference() {
|
||||
if b.rootNode != nil {
|
||||
b.rootNode.root().dereference()
|
||||
}
|
||||
|
||||
for _, child := range b.buckets {
|
||||
child.dereference()
|
||||
}
|
||||
}
|
||||
|
||||
// pageNode returns the in-memory node, if it exists.
|
||||
// Otherwise returns the underlying page.
|
||||
func (b *Bucket) pageNode(id pgid) (*page, *node) {
|
||||
// Inline buckets have a fake page embedded in their value so treat them
|
||||
// differently. We'll return the rootNode (if available) or the fake page.
|
||||
if b.root == 0 {
|
||||
if id != 0 {
|
||||
panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
|
||||
}
|
||||
if b.rootNode != nil {
|
||||
return nil, b.rootNode
|
||||
}
|
||||
return b.page, nil
|
||||
}
|
||||
|
||||
// Check the node cache for non-inline buckets.
|
||||
if b.nodes != nil {
|
||||
if n := b.nodes[id]; n != nil {
|
||||
return nil, n
|
||||
}
|
||||
}
|
||||
|
||||
// Finally lookup the page from the transaction if no node is materialized.
|
||||
return b.tx.page(id), nil
|
||||
}
|
||||
|
||||
// BucketStats records statistics about resources used by a bucket.
|
||||
type BucketStats struct {
|
||||
// Page count statistics.
|
||||
BranchPageN int // number of logical branch pages
|
||||
BranchOverflowN int // number of physical branch overflow pages
|
||||
LeafPageN int // number of logical leaf pages
|
||||
LeafOverflowN int // number of physical leaf overflow pages
|
||||
|
||||
// Tree statistics.
|
||||
KeyN int // number of keys/value pairs
|
||||
Depth int // number of levels in B+tree
|
||||
|
||||
// Page size utilization.
|
||||
BranchAlloc int // bytes allocated for physical branch pages
|
||||
BranchInuse int // bytes actually used for branch data
|
||||
LeafAlloc int // bytes allocated for physical leaf pages
|
||||
LeafInuse int // bytes actually used for leaf data
|
||||
|
||||
// Bucket statistics
|
||||
BucketN int // total number of buckets including the top bucket
|
||||
InlineBucketN int // total number on inlined buckets
|
||||
InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse)
|
||||
}
|
||||
|
||||
func (s *BucketStats) Add(other BucketStats) {
|
||||
s.BranchPageN += other.BranchPageN
|
||||
s.BranchOverflowN += other.BranchOverflowN
|
||||
s.LeafPageN += other.LeafPageN
|
||||
s.LeafOverflowN += other.LeafOverflowN
|
||||
s.KeyN += other.KeyN
|
||||
if s.Depth < other.Depth {
|
||||
s.Depth = other.Depth
|
||||
}
|
||||
s.BranchAlloc += other.BranchAlloc
|
||||
s.BranchInuse += other.BranchInuse
|
||||
s.LeafAlloc += other.LeafAlloc
|
||||
s.LeafInuse += other.LeafInuse
|
||||
|
||||
s.BucketN += other.BucketN
|
||||
s.InlineBucketN += other.InlineBucketN
|
||||
s.InlineBucketInuse += other.InlineBucketInuse
|
||||
}
|
||||
|
||||
// cloneBytes returns a copy of a given slice.
|
||||
func cloneBytes(v []byte) []byte {
|
||||
var clone = make([]byte, len(v))
|
||||
copy(clone, v)
|
||||
return clone
|
||||
}
|
400
vendor/github.com/boltdb/bolt/cursor.go
generated
vendored
Normal file
400
vendor/github.com/boltdb/bolt/cursor.go
generated
vendored
Normal file
@ -0,0 +1,400 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
|
||||
// Cursors see nested buckets with value == nil.
|
||||
// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
|
||||
//
|
||||
// Keys and values returned from the cursor are only valid for the life of the transaction.
|
||||
//
|
||||
// Changing data while traversing with a cursor may cause it to be invalidated
|
||||
// and return unexpected keys and/or values. You must reposition your cursor
|
||||
// after mutating data.
|
||||
type Cursor struct {
|
||||
bucket *Bucket
|
||||
stack []elemRef
|
||||
}
|
||||
|
||||
// Bucket returns the bucket that this cursor was created from.
|
||||
func (c *Cursor) Bucket() *Bucket {
|
||||
return c.bucket
|
||||
}
|
||||
|
||||
// First moves the cursor to the first item in the bucket and returns its key and value.
|
||||
// If the bucket is empty then a nil key and value are returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) First() (key []byte, value []byte) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
c.stack = c.stack[:0]
|
||||
p, n := c.bucket.pageNode(c.bucket.root)
|
||||
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
|
||||
c.first()
|
||||
|
||||
// If we land on an empty page then move to the next value.
|
||||
// https://github.com/boltdb/bolt/issues/450
|
||||
if c.stack[len(c.stack)-1].count() == 0 {
|
||||
c.next()
|
||||
}
|
||||
|
||||
k, v, flags := c.keyValue()
|
||||
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
|
||||
}
|
||||
|
||||
// Last moves the cursor to the last item in the bucket and returns its key and value.
|
||||
// If the bucket is empty then a nil key and value are returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) Last() (key []byte, value []byte) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
c.stack = c.stack[:0]
|
||||
p, n := c.bucket.pageNode(c.bucket.root)
|
||||
ref := elemRef{page: p, node: n}
|
||||
ref.index = ref.count() - 1
|
||||
c.stack = append(c.stack, ref)
|
||||
c.last()
|
||||
k, v, flags := c.keyValue()
|
||||
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
}
|
||||
|
||||
// Next moves the cursor to the next item in the bucket and returns its key and value.
|
||||
// If the cursor is at the end of the bucket then a nil key and value are returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) Next() (key []byte, value []byte) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
k, v, flags := c.next()
|
||||
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
}
|
||||
|
||||
// Prev moves the cursor to the previous item in the bucket and returns its key and value.
|
||||
// If the cursor is at the beginning of the bucket then a nil key and value are returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) Prev() (key []byte, value []byte) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
|
||||
// Attempt to move back one element until we're successful.
|
||||
// Move up the stack as we hit the beginning of each page in our stack.
|
||||
for i := len(c.stack) - 1; i >= 0; i-- {
|
||||
elem := &c.stack[i]
|
||||
if elem.index > 0 {
|
||||
elem.index--
|
||||
break
|
||||
}
|
||||
c.stack = c.stack[:i]
|
||||
}
|
||||
|
||||
// If we've hit the end then return nil.
|
||||
if len(c.stack) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Move down the stack to find the last element of the last leaf under this branch.
|
||||
c.last()
|
||||
k, v, flags := c.keyValue()
|
||||
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
}
|
||||
|
||||
// Seek moves the cursor to a given key and returns it.
|
||||
// If the key does not exist then the next key is used. If no keys
|
||||
// follow, a nil key is returned.
|
||||
// The returned key and value are only valid for the life of the transaction.
|
||||
func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
|
||||
k, v, flags := c.seek(seek)
|
||||
|
||||
// If we ended up after the last element of a page then move to the next one.
|
||||
if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() {
|
||||
k, v, flags = c.next()
|
||||
}
|
||||
|
||||
if k == nil {
|
||||
return nil, nil
|
||||
} else if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||
return k, nil
|
||||
}
|
||||
return k, v
|
||||
}
|
||||
|
||||
// Delete removes the current key/value under the cursor from the bucket.
|
||||
// Delete fails if current key/value is a bucket or if the transaction is not writable.
|
||||
func (c *Cursor) Delete() error {
|
||||
if c.bucket.tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !c.bucket.Writable() {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
key, _, flags := c.keyValue()
|
||||
// Return an error if current value is a bucket.
|
||||
if (flags & bucketLeafFlag) != 0 {
|
||||
return ErrIncompatibleValue
|
||||
}
|
||||
c.node().del(key)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// seek moves the cursor to a given key and returns it.
|
||||
// If the key does not exist then the next key is used.
|
||||
func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
|
||||
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||
|
||||
// Start from root page/node and traverse to correct page.
|
||||
c.stack = c.stack[:0]
|
||||
c.search(seek, c.bucket.root)
|
||||
ref := &c.stack[len(c.stack)-1]
|
||||
|
||||
// If the cursor is pointing to the end of page/node then return nil.
|
||||
if ref.index >= ref.count() {
|
||||
return nil, nil, 0
|
||||
}
|
||||
|
||||
// If this is a bucket then return a nil value.
|
||||
return c.keyValue()
|
||||
}
|
||||
|
||||
// first moves the cursor to the first leaf element under the last page in the stack.
|
||||
func (c *Cursor) first() {
|
||||
for {
|
||||
// Exit when we hit a leaf page.
|
||||
var ref = &c.stack[len(c.stack)-1]
|
||||
if ref.isLeaf() {
|
||||
break
|
||||
}
|
||||
|
||||
// Keep adding pages pointing to the first element to the stack.
|
||||
var pgid pgid
|
||||
if ref.node != nil {
|
||||
pgid = ref.node.inodes[ref.index].pgid
|
||||
} else {
|
||||
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
|
||||
}
|
||||
p, n := c.bucket.pageNode(pgid)
|
||||
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
|
||||
}
|
||||
}
|
||||
|
||||
// last moves the cursor to the last leaf element under the last page in the stack.
|
||||
func (c *Cursor) last() {
|
||||
for {
|
||||
// Exit when we hit a leaf page.
|
||||
ref := &c.stack[len(c.stack)-1]
|
||||
if ref.isLeaf() {
|
||||
break
|
||||
}
|
||||
|
||||
// Keep adding pages pointing to the last element in the stack.
|
||||
var pgid pgid
|
||||
if ref.node != nil {
|
||||
pgid = ref.node.inodes[ref.index].pgid
|
||||
} else {
|
||||
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
|
||||
}
|
||||
p, n := c.bucket.pageNode(pgid)
|
||||
|
||||
var nextRef = elemRef{page: p, node: n}
|
||||
nextRef.index = nextRef.count() - 1
|
||||
c.stack = append(c.stack, nextRef)
|
||||
}
|
||||
}
|
||||
|
||||
// next moves to the next leaf element and returns the key and value.
|
||||
// If the cursor is at the last leaf element then it stays there and returns nil.
|
||||
func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
|
||||
for {
|
||||
// Attempt to move over one element until we're successful.
|
||||
// Move up the stack as we hit the end of each page in our stack.
|
||||
var i int
|
||||
for i = len(c.stack) - 1; i >= 0; i-- {
|
||||
elem := &c.stack[i]
|
||||
if elem.index < elem.count()-1 {
|
||||
elem.index++
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If we've hit the root page then stop and return. This will leave the
|
||||
// cursor on the last element of the last page.
|
||||
if i == -1 {
|
||||
return nil, nil, 0
|
||||
}
|
||||
|
||||
// Otherwise start from where we left off in the stack and find the
|
||||
// first element of the first leaf page.
|
||||
c.stack = c.stack[:i+1]
|
||||
c.first()
|
||||
|
||||
// If this is an empty page then restart and move back up the stack.
|
||||
// https://github.com/boltdb/bolt/issues/450
|
||||
if c.stack[len(c.stack)-1].count() == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
return c.keyValue()
|
||||
}
|
||||
}
|
||||
|
||||
// search recursively performs a binary search against a given page/node until it finds a given key.
|
||||
func (c *Cursor) search(key []byte, pgid pgid) {
|
||||
p, n := c.bucket.pageNode(pgid)
|
||||
if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
|
||||
panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
|
||||
}
|
||||
e := elemRef{page: p, node: n}
|
||||
c.stack = append(c.stack, e)
|
||||
|
||||
// If we're on a leaf page/node then find the specific node.
|
||||
if e.isLeaf() {
|
||||
c.nsearch(key)
|
||||
return
|
||||
}
|
||||
|
||||
if n != nil {
|
||||
c.searchNode(key, n)
|
||||
return
|
||||
}
|
||||
c.searchPage(key, p)
|
||||
}
|
||||
|
||||
func (c *Cursor) searchNode(key []byte, n *node) {
|
||||
var exact bool
|
||||
index := sort.Search(len(n.inodes), func(i int) bool {
|
||||
// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
|
||||
// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
|
||||
ret := bytes.Compare(n.inodes[i].key, key)
|
||||
if ret == 0 {
|
||||
exact = true
|
||||
}
|
||||
return ret != -1
|
||||
})
|
||||
if !exact && index > 0 {
|
||||
index--
|
||||
}
|
||||
c.stack[len(c.stack)-1].index = index
|
||||
|
||||
// Recursively search to the next page.
|
||||
c.search(key, n.inodes[index].pgid)
|
||||
}
|
||||
|
||||
func (c *Cursor) searchPage(key []byte, p *page) {
|
||||
// Binary search for the correct range.
|
||||
inodes := p.branchPageElements()
|
||||
|
||||
var exact bool
|
||||
index := sort.Search(int(p.count), func(i int) bool {
|
||||
// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
|
||||
// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
|
||||
ret := bytes.Compare(inodes[i].key(), key)
|
||||
if ret == 0 {
|
||||
exact = true
|
||||
}
|
||||
return ret != -1
|
||||
})
|
||||
if !exact && index > 0 {
|
||||
index--
|
||||
}
|
||||
c.stack[len(c.stack)-1].index = index
|
||||
|
||||
// Recursively search to the next page.
|
||||
c.search(key, inodes[index].pgid)
|
||||
}
|
||||
|
||||
// nsearch searches the leaf node on the top of the stack for a key.
|
||||
func (c *Cursor) nsearch(key []byte) {
|
||||
e := &c.stack[len(c.stack)-1]
|
||||
p, n := e.page, e.node
|
||||
|
||||
// If we have a node then search its inodes.
|
||||
if n != nil {
|
||||
index := sort.Search(len(n.inodes), func(i int) bool {
|
||||
return bytes.Compare(n.inodes[i].key, key) != -1
|
||||
})
|
||||
e.index = index
|
||||
return
|
||||
}
|
||||
|
||||
// If we have a page then search its leaf elements.
|
||||
inodes := p.leafPageElements()
|
||||
index := sort.Search(int(p.count), func(i int) bool {
|
||||
return bytes.Compare(inodes[i].key(), key) != -1
|
||||
})
|
||||
e.index = index
|
||||
}
|
||||
|
||||
// keyValue returns the key and value of the current leaf element.
|
||||
func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
|
||||
ref := &c.stack[len(c.stack)-1]
|
||||
if ref.count() == 0 || ref.index >= ref.count() {
|
||||
return nil, nil, 0
|
||||
}
|
||||
|
||||
// Retrieve value from node.
|
||||
if ref.node != nil {
|
||||
inode := &ref.node.inodes[ref.index]
|
||||
return inode.key, inode.value, inode.flags
|
||||
}
|
||||
|
||||
// Or retrieve value from page.
|
||||
elem := ref.page.leafPageElement(uint16(ref.index))
|
||||
return elem.key(), elem.value(), elem.flags
|
||||
}
|
||||
|
||||
// node returns the node that the cursor is currently positioned on.
|
||||
func (c *Cursor) node() *node {
|
||||
_assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
|
||||
|
||||
// If the top of the stack is a leaf node then just return it.
|
||||
if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() {
|
||||
return ref.node
|
||||
}
|
||||
|
||||
// Start from root and traverse down the hierarchy.
|
||||
var n = c.stack[0].node
|
||||
if n == nil {
|
||||
n = c.bucket.node(c.stack[0].page.id, nil)
|
||||
}
|
||||
for _, ref := range c.stack[:len(c.stack)-1] {
|
||||
_assert(!n.isLeaf, "expected branch node")
|
||||
n = n.childAt(int(ref.index))
|
||||
}
|
||||
_assert(n.isLeaf, "expected leaf node")
|
||||
return n
|
||||
}
|
||||
|
||||
// elemRef represents a reference to an element on a given page/node.
|
||||
type elemRef struct {
|
||||
page *page
|
||||
node *node
|
||||
index int
|
||||
}
|
||||
|
||||
// isLeaf returns whether the ref is pointing at a leaf page/node.
|
||||
func (r *elemRef) isLeaf() bool {
|
||||
if r.node != nil {
|
||||
return r.node.isLeaf
|
||||
}
|
||||
return (r.page.flags & leafPageFlag) != 0
|
||||
}
|
||||
|
||||
// count returns the number of inodes or page elements.
|
||||
func (r *elemRef) count() int {
|
||||
if r.node != nil {
|
||||
return len(r.node.inodes)
|
||||
}
|
||||
return int(r.page.count)
|
||||
}
|
1037
vendor/github.com/boltdb/bolt/db.go
generated
vendored
Normal file
1037
vendor/github.com/boltdb/bolt/db.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
44
vendor/github.com/boltdb/bolt/doc.go
generated
vendored
Normal file
44
vendor/github.com/boltdb/bolt/doc.go
generated
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
/*
|
||||
Package bolt implements a low-level key/value store in pure Go. It supports
|
||||
fully serializable transactions, ACID semantics, and lock-free MVCC with
|
||||
multiple readers and a single writer. Bolt can be used for projects that
|
||||
want a simple data store without the need to add large dependencies such as
|
||||
Postgres or MySQL.
|
||||
|
||||
Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is
|
||||
optimized for fast read access and does not require recovery in the event of a
|
||||
system crash. Transactions which have not finished committing will simply be
|
||||
rolled back in the event of a crash.
|
||||
|
||||
The design of Bolt is based on Howard Chu's LMDB database project.
|
||||
|
||||
Bolt currently works on Windows, Mac OS X, and Linux.
|
||||
|
||||
|
||||
Basics
|
||||
|
||||
There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is
|
||||
a collection of buckets and is represented by a single file on disk. A bucket is
|
||||
a collection of unique keys that are associated with values.
|
||||
|
||||
Transactions provide either read-only or read-write access to the database.
|
||||
Read-only transactions can retrieve key/value pairs and can use Cursors to
|
||||
iterate over the dataset sequentially. Read-write transactions can create and
|
||||
delete buckets and can insert and remove keys. Only one read-write transaction
|
||||
is allowed at a time.
|
||||
|
||||
|
||||
Caveats
|
||||
|
||||
The database uses a read-only, memory-mapped data file to ensure that
|
||||
applications cannot corrupt the database, however, this means that keys and
|
||||
values returned from Bolt cannot be changed. Writing to a read-only byte slice
|
||||
will cause Go to panic.
|
||||
|
||||
Keys and values retrieved from the database are only valid for the life of
|
||||
the transaction. When used outside the transaction, these byte slices can
|
||||
point to different data or can point to invalid memory which will cause a panic.
|
||||
|
||||
|
||||
*/
|
||||
package bolt
|
71
vendor/github.com/boltdb/bolt/errors.go
generated
vendored
Normal file
71
vendor/github.com/boltdb/bolt/errors.go
generated
vendored
Normal file
@ -0,0 +1,71 @@
|
||||
package bolt
|
||||
|
||||
import "errors"
|
||||
|
||||
// These errors can be returned when opening or calling methods on a DB.
|
||||
var (
|
||||
// ErrDatabaseNotOpen is returned when a DB instance is accessed before it
|
||||
// is opened or after it is closed.
|
||||
ErrDatabaseNotOpen = errors.New("database not open")
|
||||
|
||||
// ErrDatabaseOpen is returned when opening a database that is
|
||||
// already open.
|
||||
ErrDatabaseOpen = errors.New("database already open")
|
||||
|
||||
// ErrInvalid is returned when both meta pages on a database are invalid.
|
||||
// This typically occurs when a file is not a bolt database.
|
||||
ErrInvalid = errors.New("invalid database")
|
||||
|
||||
// ErrVersionMismatch is returned when the data file was created with a
|
||||
// different version of Bolt.
|
||||
ErrVersionMismatch = errors.New("version mismatch")
|
||||
|
||||
// ErrChecksum is returned when either meta page checksum does not match.
|
||||
ErrChecksum = errors.New("checksum error")
|
||||
|
||||
// ErrTimeout is returned when a database cannot obtain an exclusive lock
|
||||
// on the data file after the timeout passed to Open().
|
||||
ErrTimeout = errors.New("timeout")
|
||||
)
|
||||
|
||||
// These errors can occur when beginning or committing a Tx.
|
||||
var (
|
||||
// ErrTxNotWritable is returned when performing a write operation on a
|
||||
// read-only transaction.
|
||||
ErrTxNotWritable = errors.New("tx not writable")
|
||||
|
||||
// ErrTxClosed is returned when committing or rolling back a transaction
|
||||
// that has already been committed or rolled back.
|
||||
ErrTxClosed = errors.New("tx closed")
|
||||
|
||||
// ErrDatabaseReadOnly is returned when a mutating transaction is started on a
|
||||
// read-only database.
|
||||
ErrDatabaseReadOnly = errors.New("database is in read-only mode")
|
||||
)
|
||||
|
||||
// These errors can occur when putting or deleting a value or a bucket.
|
||||
var (
|
||||
// ErrBucketNotFound is returned when trying to access a bucket that has
|
||||
// not been created yet.
|
||||
ErrBucketNotFound = errors.New("bucket not found")
|
||||
|
||||
// ErrBucketExists is returned when creating a bucket that already exists.
|
||||
ErrBucketExists = errors.New("bucket already exists")
|
||||
|
||||
// ErrBucketNameRequired is returned when creating a bucket with a blank name.
|
||||
ErrBucketNameRequired = errors.New("bucket name required")
|
||||
|
||||
// ErrKeyRequired is returned when inserting a zero-length key.
|
||||
ErrKeyRequired = errors.New("key required")
|
||||
|
||||
// ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
|
||||
ErrKeyTooLarge = errors.New("key too large")
|
||||
|
||||
// ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
|
||||
ErrValueTooLarge = errors.New("value too large")
|
||||
|
||||
// ErrIncompatibleValue is returned when trying create or delete a bucket
|
||||
// on an existing non-bucket key or when trying to create or delete a
|
||||
// non-bucket key on an existing bucket key.
|
||||
ErrIncompatibleValue = errors.New("incompatible value")
|
||||
)
|
252
vendor/github.com/boltdb/bolt/freelist.go
generated
vendored
Normal file
252
vendor/github.com/boltdb/bolt/freelist.go
generated
vendored
Normal file
@ -0,0 +1,252 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// freelist represents a list of all pages that are available for allocation.
|
||||
// It also tracks pages that have been freed but are still in use by open transactions.
|
||||
type freelist struct {
|
||||
ids []pgid // all free and available free page ids.
|
||||
pending map[txid][]pgid // mapping of soon-to-be free page ids by tx.
|
||||
cache map[pgid]bool // fast lookup of all free and pending page ids.
|
||||
}
|
||||
|
||||
// newFreelist returns an empty, initialized freelist.
|
||||
func newFreelist() *freelist {
|
||||
return &freelist{
|
||||
pending: make(map[txid][]pgid),
|
||||
cache: make(map[pgid]bool),
|
||||
}
|
||||
}
|
||||
|
||||
// size returns the size of the page after serialization.
|
||||
func (f *freelist) size() int {
|
||||
n := f.count()
|
||||
if n >= 0xFFFF {
|
||||
// The first element will be used to store the count. See freelist.write.
|
||||
n++
|
||||
}
|
||||
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
|
||||
}
|
||||
|
||||
// count returns count of pages on the freelist
|
||||
func (f *freelist) count() int {
|
||||
return f.free_count() + f.pending_count()
|
||||
}
|
||||
|
||||
// free_count returns count of free pages
|
||||
func (f *freelist) free_count() int {
|
||||
return len(f.ids)
|
||||
}
|
||||
|
||||
// pending_count returns count of pending pages
|
||||
func (f *freelist) pending_count() int {
|
||||
var count int
|
||||
for _, list := range f.pending {
|
||||
count += len(list)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
|
||||
// f.count returns the minimum length required for dst.
|
||||
func (f *freelist) copyall(dst []pgid) {
|
||||
m := make(pgids, 0, f.pending_count())
|
||||
for _, list := range f.pending {
|
||||
m = append(m, list...)
|
||||
}
|
||||
sort.Sort(m)
|
||||
mergepgids(dst, f.ids, m)
|
||||
}
|
||||
|
||||
// allocate returns the starting page id of a contiguous list of pages of a given size.
|
||||
// If a contiguous block cannot be found then 0 is returned.
|
||||
func (f *freelist) allocate(n int) pgid {
|
||||
if len(f.ids) == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
var initial, previd pgid
|
||||
for i, id := range f.ids {
|
||||
if id <= 1 {
|
||||
panic(fmt.Sprintf("invalid page allocation: %d", id))
|
||||
}
|
||||
|
||||
// Reset initial page if this is not contiguous.
|
||||
if previd == 0 || id-previd != 1 {
|
||||
initial = id
|
||||
}
|
||||
|
||||
// If we found a contiguous block then remove it and return it.
|
||||
if (id-initial)+1 == pgid(n) {
|
||||
// If we're allocating off the beginning then take the fast path
|
||||
// and just adjust the existing slice. This will use extra memory
|
||||
// temporarily but the append() in free() will realloc the slice
|
||||
// as is necessary.
|
||||
if (i + 1) == n {
|
||||
f.ids = f.ids[i+1:]
|
||||
} else {
|
||||
copy(f.ids[i-n+1:], f.ids[i+1:])
|
||||
f.ids = f.ids[:len(f.ids)-n]
|
||||
}
|
||||
|
||||
// Remove from the free cache.
|
||||
for i := pgid(0); i < pgid(n); i++ {
|
||||
delete(f.cache, initial+i)
|
||||
}
|
||||
|
||||
return initial
|
||||
}
|
||||
|
||||
previd = id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// free releases a page and its overflow for a given transaction id.
|
||||
// If the page is already free then a panic will occur.
|
||||
func (f *freelist) free(txid txid, p *page) {
|
||||
if p.id <= 1 {
|
||||
panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
|
||||
}
|
||||
|
||||
// Free page and all its overflow pages.
|
||||
var ids = f.pending[txid]
|
||||
for id := p.id; id <= p.id+pgid(p.overflow); id++ {
|
||||
// Verify that page is not already free.
|
||||
if f.cache[id] {
|
||||
panic(fmt.Sprintf("page %d already freed", id))
|
||||
}
|
||||
|
||||
// Add to the freelist and cache.
|
||||
ids = append(ids, id)
|
||||
f.cache[id] = true
|
||||
}
|
||||
f.pending[txid] = ids
|
||||
}
|
||||
|
||||
// release moves all page ids for a transaction id (or older) to the freelist.
|
||||
func (f *freelist) release(txid txid) {
|
||||
m := make(pgids, 0)
|
||||
for tid, ids := range f.pending {
|
||||
if tid <= txid {
|
||||
// Move transaction's pending pages to the available freelist.
|
||||
// Don't remove from the cache since the page is still free.
|
||||
m = append(m, ids...)
|
||||
delete(f.pending, tid)
|
||||
}
|
||||
}
|
||||
sort.Sort(m)
|
||||
f.ids = pgids(f.ids).merge(m)
|
||||
}
|
||||
|
||||
// rollback removes the pages from a given pending tx.
|
||||
func (f *freelist) rollback(txid txid) {
|
||||
// Remove page ids from cache.
|
||||
for _, id := range f.pending[txid] {
|
||||
delete(f.cache, id)
|
||||
}
|
||||
|
||||
// Remove pages from pending list.
|
||||
delete(f.pending, txid)
|
||||
}
|
||||
|
||||
// freed returns whether a given page is in the free list.
|
||||
func (f *freelist) freed(pgid pgid) bool {
|
||||
return f.cache[pgid]
|
||||
}
|
||||
|
||||
// read initializes the freelist from a freelist page.
|
||||
func (f *freelist) read(p *page) {
|
||||
// If the page.count is at the max uint16 value (64k) then it's considered
|
||||
// an overflow and the size of the freelist is stored as the first element.
|
||||
idx, count := 0, int(p.count)
|
||||
if count == 0xFFFF {
|
||||
idx = 1
|
||||
count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
|
||||
}
|
||||
|
||||
// Copy the list of page ids from the freelist.
|
||||
if count == 0 {
|
||||
f.ids = nil
|
||||
} else {
|
||||
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
|
||||
f.ids = make([]pgid, len(ids))
|
||||
copy(f.ids, ids)
|
||||
|
||||
// Make sure they're sorted.
|
||||
sort.Sort(pgids(f.ids))
|
||||
}
|
||||
|
||||
// Rebuild the page cache.
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
// write writes the page ids onto a freelist page. All free and pending ids are
|
||||
// saved to disk since in the event of a program crash, all pending ids will
|
||||
// become free.
|
||||
func (f *freelist) write(p *page) error {
|
||||
// Combine the old free pgids and pgids waiting on an open transaction.
|
||||
|
||||
// Update the header flag.
|
||||
p.flags |= freelistPageFlag
|
||||
|
||||
// The page.count can only hold up to 64k elements so if we overflow that
|
||||
// number then we handle it by putting the size in the first element.
|
||||
lenids := f.count()
|
||||
if lenids == 0 {
|
||||
p.count = uint16(lenids)
|
||||
} else if lenids < 0xFFFF {
|
||||
p.count = uint16(lenids)
|
||||
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
|
||||
} else {
|
||||
p.count = 0xFFFF
|
||||
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
|
||||
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// reload reads the freelist from a page and filters out pending items.
|
||||
func (f *freelist) reload(p *page) {
|
||||
f.read(p)
|
||||
|
||||
// Build a cache of only pending pages.
|
||||
pcache := make(map[pgid]bool)
|
||||
for _, pendingIDs := range f.pending {
|
||||
for _, pendingID := range pendingIDs {
|
||||
pcache[pendingID] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Check each page in the freelist and build a new available freelist
|
||||
// with any pages not in the pending lists.
|
||||
var a []pgid
|
||||
for _, id := range f.ids {
|
||||
if !pcache[id] {
|
||||
a = append(a, id)
|
||||
}
|
||||
}
|
||||
f.ids = a
|
||||
|
||||
// Once the available list is rebuilt then rebuild the free cache so that
|
||||
// it includes the available and pending free pages.
|
||||
f.reindex()
|
||||
}
|
||||
|
||||
// reindex rebuilds the free cache based on available and pending free lists.
|
||||
func (f *freelist) reindex() {
|
||||
f.cache = make(map[pgid]bool, len(f.ids))
|
||||
for _, id := range f.ids {
|
||||
f.cache[id] = true
|
||||
}
|
||||
for _, pendingIDs := range f.pending {
|
||||
for _, pendingID := range pendingIDs {
|
||||
f.cache[pendingID] = true
|
||||
}
|
||||
}
|
||||
}
|
604
vendor/github.com/boltdb/bolt/node.go
generated
vendored
Normal file
604
vendor/github.com/boltdb/bolt/node.go
generated
vendored
Normal file
@ -0,0 +1,604 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// node represents an in-memory, deserialized page.
|
||||
type node struct {
|
||||
bucket *Bucket
|
||||
isLeaf bool
|
||||
unbalanced bool
|
||||
spilled bool
|
||||
key []byte
|
||||
pgid pgid
|
||||
parent *node
|
||||
children nodes
|
||||
inodes inodes
|
||||
}
|
||||
|
||||
// root returns the top-level node this node is attached to.
|
||||
func (n *node) root() *node {
|
||||
if n.parent == nil {
|
||||
return n
|
||||
}
|
||||
return n.parent.root()
|
||||
}
|
||||
|
||||
// minKeys returns the minimum number of inodes this node should have.
|
||||
func (n *node) minKeys() int {
|
||||
if n.isLeaf {
|
||||
return 1
|
||||
}
|
||||
return 2
|
||||
}
|
||||
|
||||
// size returns the size of the node after serialization.
|
||||
func (n *node) size() int {
|
||||
sz, elsz := pageHeaderSize, n.pageElementSize()
|
||||
for i := 0; i < len(n.inodes); i++ {
|
||||
item := &n.inodes[i]
|
||||
sz += elsz + len(item.key) + len(item.value)
|
||||
}
|
||||
return sz
|
||||
}
|
||||
|
||||
// sizeLessThan returns true if the node is less than a given size.
|
||||
// This is an optimization to avoid calculating a large node when we only need
|
||||
// to know if it fits inside a certain page size.
|
||||
func (n *node) sizeLessThan(v int) bool {
|
||||
sz, elsz := pageHeaderSize, n.pageElementSize()
|
||||
for i := 0; i < len(n.inodes); i++ {
|
||||
item := &n.inodes[i]
|
||||
sz += elsz + len(item.key) + len(item.value)
|
||||
if sz >= v {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// pageElementSize returns the size of each page element based on the type of node.
|
||||
func (n *node) pageElementSize() int {
|
||||
if n.isLeaf {
|
||||
return leafPageElementSize
|
||||
}
|
||||
return branchPageElementSize
|
||||
}
|
||||
|
||||
// childAt returns the child node at a given index.
|
||||
func (n *node) childAt(index int) *node {
|
||||
if n.isLeaf {
|
||||
panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
|
||||
}
|
||||
return n.bucket.node(n.inodes[index].pgid, n)
|
||||
}
|
||||
|
||||
// childIndex returns the index of a given child node.
|
||||
func (n *node) childIndex(child *node) int {
|
||||
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 })
|
||||
return index
|
||||
}
|
||||
|
||||
// numChildren returns the number of children.
|
||||
func (n *node) numChildren() int {
|
||||
return len(n.inodes)
|
||||
}
|
||||
|
||||
// nextSibling returns the next node with the same parent.
|
||||
func (n *node) nextSibling() *node {
|
||||
if n.parent == nil {
|
||||
return nil
|
||||
}
|
||||
index := n.parent.childIndex(n)
|
||||
if index >= n.parent.numChildren()-1 {
|
||||
return nil
|
||||
}
|
||||
return n.parent.childAt(index + 1)
|
||||
}
|
||||
|
||||
// prevSibling returns the previous node with the same parent.
|
||||
func (n *node) prevSibling() *node {
|
||||
if n.parent == nil {
|
||||
return nil
|
||||
}
|
||||
index := n.parent.childIndex(n)
|
||||
if index == 0 {
|
||||
return nil
|
||||
}
|
||||
return n.parent.childAt(index - 1)
|
||||
}
|
||||
|
||||
// put inserts a key/value.
|
||||
func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
|
||||
if pgid >= n.bucket.tx.meta.pgid {
|
||||
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
|
||||
} else if len(oldKey) <= 0 {
|
||||
panic("put: zero-length old key")
|
||||
} else if len(newKey) <= 0 {
|
||||
panic("put: zero-length new key")
|
||||
}
|
||||
|
||||
// Find insertion index.
|
||||
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
|
||||
|
||||
// Add capacity and shift nodes if we don't have an exact match and need to insert.
|
||||
exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey))
|
||||
if !exact {
|
||||
n.inodes = append(n.inodes, inode{})
|
||||
copy(n.inodes[index+1:], n.inodes[index:])
|
||||
}
|
||||
|
||||
inode := &n.inodes[index]
|
||||
inode.flags = flags
|
||||
inode.key = newKey
|
||||
inode.value = value
|
||||
inode.pgid = pgid
|
||||
_assert(len(inode.key) > 0, "put: zero-length inode key")
|
||||
}
|
||||
|
||||
// del removes a key from the node.
|
||||
func (n *node) del(key []byte) {
|
||||
// Find index of key.
|
||||
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 })
|
||||
|
||||
// Exit if the key isn't found.
|
||||
if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) {
|
||||
return
|
||||
}
|
||||
|
||||
// Delete inode from the node.
|
||||
n.inodes = append(n.inodes[:index], n.inodes[index+1:]...)
|
||||
|
||||
// Mark the node as needing rebalancing.
|
||||
n.unbalanced = true
|
||||
}
|
||||
|
||||
// read initializes the node from a page.
|
||||
func (n *node) read(p *page) {
|
||||
n.pgid = p.id
|
||||
n.isLeaf = ((p.flags & leafPageFlag) != 0)
|
||||
n.inodes = make(inodes, int(p.count))
|
||||
|
||||
for i := 0; i < int(p.count); i++ {
|
||||
inode := &n.inodes[i]
|
||||
if n.isLeaf {
|
||||
elem := p.leafPageElement(uint16(i))
|
||||
inode.flags = elem.flags
|
||||
inode.key = elem.key()
|
||||
inode.value = elem.value()
|
||||
} else {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
inode.pgid = elem.pgid
|
||||
inode.key = elem.key()
|
||||
}
|
||||
_assert(len(inode.key) > 0, "read: zero-length inode key")
|
||||
}
|
||||
|
||||
// Save first key so we can find the node in the parent when we spill.
|
||||
if len(n.inodes) > 0 {
|
||||
n.key = n.inodes[0].key
|
||||
_assert(len(n.key) > 0, "read: zero-length node key")
|
||||
} else {
|
||||
n.key = nil
|
||||
}
|
||||
}
|
||||
|
||||
// write writes the items onto one or more pages.
|
||||
func (n *node) write(p *page) {
|
||||
// Initialize page.
|
||||
if n.isLeaf {
|
||||
p.flags |= leafPageFlag
|
||||
} else {
|
||||
p.flags |= branchPageFlag
|
||||
}
|
||||
|
||||
if len(n.inodes) >= 0xFFFF {
|
||||
panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id))
|
||||
}
|
||||
p.count = uint16(len(n.inodes))
|
||||
|
||||
// Stop here if there are no items to write.
|
||||
if p.count == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Loop over each item and write it to the page.
|
||||
b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
|
||||
for i, item := range n.inodes {
|
||||
_assert(len(item.key) > 0, "write: zero-length inode key")
|
||||
|
||||
// Write the page element.
|
||||
if n.isLeaf {
|
||||
elem := p.leafPageElement(uint16(i))
|
||||
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
|
||||
elem.flags = item.flags
|
||||
elem.ksize = uint32(len(item.key))
|
||||
elem.vsize = uint32(len(item.value))
|
||||
} else {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
|
||||
elem.ksize = uint32(len(item.key))
|
||||
elem.pgid = item.pgid
|
||||
_assert(elem.pgid != p.id, "write: circular dependency occurred")
|
||||
}
|
||||
|
||||
// If the length of key+value is larger than the max allocation size
|
||||
// then we need to reallocate the byte array pointer.
|
||||
//
|
||||
// See: https://github.com/boltdb/bolt/pull/335
|
||||
klen, vlen := len(item.key), len(item.value)
|
||||
if len(b) < klen+vlen {
|
||||
b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:]
|
||||
}
|
||||
|
||||
// Write data for the element to the end of the page.
|
||||
copy(b[0:], item.key)
|
||||
b = b[klen:]
|
||||
copy(b[0:], item.value)
|
||||
b = b[vlen:]
|
||||
}
|
||||
|
||||
// DEBUG ONLY: n.dump()
|
||||
}
|
||||
|
||||
// split breaks up a node into multiple smaller nodes, if appropriate.
|
||||
// This should only be called from the spill() function.
|
||||
func (n *node) split(pageSize int) []*node {
|
||||
var nodes []*node
|
||||
|
||||
node := n
|
||||
for {
|
||||
// Split node into two.
|
||||
a, b := node.splitTwo(pageSize)
|
||||
nodes = append(nodes, a)
|
||||
|
||||
// If we can't split then exit the loop.
|
||||
if b == nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Set node to b so it gets split on the next iteration.
|
||||
node = b
|
||||
}
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
// splitTwo breaks up a node into two smaller nodes, if appropriate.
|
||||
// This should only be called from the split() function.
|
||||
func (n *node) splitTwo(pageSize int) (*node, *node) {
|
||||
// Ignore the split if the page doesn't have at least enough nodes for
|
||||
// two pages or if the nodes can fit in a single page.
|
||||
if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) {
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Determine the threshold before starting a new node.
|
||||
var fillPercent = n.bucket.FillPercent
|
||||
if fillPercent < minFillPercent {
|
||||
fillPercent = minFillPercent
|
||||
} else if fillPercent > maxFillPercent {
|
||||
fillPercent = maxFillPercent
|
||||
}
|
||||
threshold := int(float64(pageSize) * fillPercent)
|
||||
|
||||
// Determine split position and sizes of the two pages.
|
||||
splitIndex, _ := n.splitIndex(threshold)
|
||||
|
||||
// Split node into two separate nodes.
|
||||
// If there's no parent then we'll need to create one.
|
||||
if n.parent == nil {
|
||||
n.parent = &node{bucket: n.bucket, children: []*node{n}}
|
||||
}
|
||||
|
||||
// Create a new node and add it to the parent.
|
||||
next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent}
|
||||
n.parent.children = append(n.parent.children, next)
|
||||
|
||||
// Split inodes across two nodes.
|
||||
next.inodes = n.inodes[splitIndex:]
|
||||
n.inodes = n.inodes[:splitIndex]
|
||||
|
||||
// Update the statistics.
|
||||
n.bucket.tx.stats.Split++
|
||||
|
||||
return n, next
|
||||
}
|
||||
|
||||
// splitIndex finds the position where a page will fill a given threshold.
|
||||
// It returns the index as well as the size of the first page.
|
||||
// This is only be called from split().
|
||||
func (n *node) splitIndex(threshold int) (index, sz int) {
|
||||
sz = pageHeaderSize
|
||||
|
||||
// Loop until we only have the minimum number of keys required for the second page.
|
||||
for i := 0; i < len(n.inodes)-minKeysPerPage; i++ {
|
||||
index = i
|
||||
inode := n.inodes[i]
|
||||
elsize := n.pageElementSize() + len(inode.key) + len(inode.value)
|
||||
|
||||
// If we have at least the minimum number of keys and adding another
|
||||
// node would put us over the threshold then exit and return.
|
||||
if i >= minKeysPerPage && sz+elsize > threshold {
|
||||
break
|
||||
}
|
||||
|
||||
// Add the element size to the total size.
|
||||
sz += elsize
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// spill writes the nodes to dirty pages and splits nodes as it goes.
|
||||
// Returns an error if dirty pages cannot be allocated.
|
||||
func (n *node) spill() error {
|
||||
var tx = n.bucket.tx
|
||||
if n.spilled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Spill child nodes first. Child nodes can materialize sibling nodes in
|
||||
// the case of split-merge so we cannot use a range loop. We have to check
|
||||
// the children size on every loop iteration.
|
||||
sort.Sort(n.children)
|
||||
for i := 0; i < len(n.children); i++ {
|
||||
if err := n.children[i].spill(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// We no longer need the child list because it's only used for spill tracking.
|
||||
n.children = nil
|
||||
|
||||
// Split nodes into appropriate sizes. The first node will always be n.
|
||||
var nodes = n.split(tx.db.pageSize)
|
||||
for _, node := range nodes {
|
||||
// Add node's page to the freelist if it's not new.
|
||||
if node.pgid > 0 {
|
||||
tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid))
|
||||
node.pgid = 0
|
||||
}
|
||||
|
||||
// Allocate contiguous space for the node.
|
||||
p, err := tx.allocate((node.size() / tx.db.pageSize) + 1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the node.
|
||||
if p.id >= tx.meta.pgid {
|
||||
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid))
|
||||
}
|
||||
node.pgid = p.id
|
||||
node.write(p)
|
||||
node.spilled = true
|
||||
|
||||
// Insert into parent inodes.
|
||||
if node.parent != nil {
|
||||
var key = node.key
|
||||
if key == nil {
|
||||
key = node.inodes[0].key
|
||||
}
|
||||
|
||||
node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0)
|
||||
node.key = node.inodes[0].key
|
||||
_assert(len(node.key) > 0, "spill: zero-length node key")
|
||||
}
|
||||
|
||||
// Update the statistics.
|
||||
tx.stats.Spill++
|
||||
}
|
||||
|
||||
// If the root node split and created a new root then we need to spill that
|
||||
// as well. We'll clear out the children to make sure it doesn't try to respill.
|
||||
if n.parent != nil && n.parent.pgid == 0 {
|
||||
n.children = nil
|
||||
return n.parent.spill()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// rebalance attempts to combine the node with sibling nodes if the node fill
|
||||
// size is below a threshold or if there are not enough keys.
|
||||
func (n *node) rebalance() {
|
||||
if !n.unbalanced {
|
||||
return
|
||||
}
|
||||
n.unbalanced = false
|
||||
|
||||
// Update statistics.
|
||||
n.bucket.tx.stats.Rebalance++
|
||||
|
||||
// Ignore if node is above threshold (25%) and has enough keys.
|
||||
var threshold = n.bucket.tx.db.pageSize / 4
|
||||
if n.size() > threshold && len(n.inodes) > n.minKeys() {
|
||||
return
|
||||
}
|
||||
|
||||
// Root node has special handling.
|
||||
if n.parent == nil {
|
||||
// If root node is a branch and only has one node then collapse it.
|
||||
if !n.isLeaf && len(n.inodes) == 1 {
|
||||
// Move root's child up.
|
||||
child := n.bucket.node(n.inodes[0].pgid, n)
|
||||
n.isLeaf = child.isLeaf
|
||||
n.inodes = child.inodes[:]
|
||||
n.children = child.children
|
||||
|
||||
// Reparent all child nodes being moved.
|
||||
for _, inode := range n.inodes {
|
||||
if child, ok := n.bucket.nodes[inode.pgid]; ok {
|
||||
child.parent = n
|
||||
}
|
||||
}
|
||||
|
||||
// Remove old child.
|
||||
child.parent = nil
|
||||
delete(n.bucket.nodes, child.pgid)
|
||||
child.free()
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// If node has no keys then just remove it.
|
||||
if n.numChildren() == 0 {
|
||||
n.parent.del(n.key)
|
||||
n.parent.removeChild(n)
|
||||
delete(n.bucket.nodes, n.pgid)
|
||||
n.free()
|
||||
n.parent.rebalance()
|
||||
return
|
||||
}
|
||||
|
||||
_assert(n.parent.numChildren() > 1, "parent must have at least 2 children")
|
||||
|
||||
// Destination node is right sibling if idx == 0, otherwise left sibling.
|
||||
var target *node
|
||||
var useNextSibling = (n.parent.childIndex(n) == 0)
|
||||
if useNextSibling {
|
||||
target = n.nextSibling()
|
||||
} else {
|
||||
target = n.prevSibling()
|
||||
}
|
||||
|
||||
// If both this node and the target node are too small then merge them.
|
||||
if useNextSibling {
|
||||
// Reparent all child nodes being moved.
|
||||
for _, inode := range target.inodes {
|
||||
if child, ok := n.bucket.nodes[inode.pgid]; ok {
|
||||
child.parent.removeChild(child)
|
||||
child.parent = n
|
||||
child.parent.children = append(child.parent.children, child)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy over inodes from target and remove target.
|
||||
n.inodes = append(n.inodes, target.inodes...)
|
||||
n.parent.del(target.key)
|
||||
n.parent.removeChild(target)
|
||||
delete(n.bucket.nodes, target.pgid)
|
||||
target.free()
|
||||
} else {
|
||||
// Reparent all child nodes being moved.
|
||||
for _, inode := range n.inodes {
|
||||
if child, ok := n.bucket.nodes[inode.pgid]; ok {
|
||||
child.parent.removeChild(child)
|
||||
child.parent = target
|
||||
child.parent.children = append(child.parent.children, child)
|
||||
}
|
||||
}
|
||||
|
||||
// Copy over inodes to target and remove node.
|
||||
target.inodes = append(target.inodes, n.inodes...)
|
||||
n.parent.del(n.key)
|
||||
n.parent.removeChild(n)
|
||||
delete(n.bucket.nodes, n.pgid)
|
||||
n.free()
|
||||
}
|
||||
|
||||
// Either this node or the target node was deleted from the parent so rebalance it.
|
||||
n.parent.rebalance()
|
||||
}
|
||||
|
||||
// removes a node from the list of in-memory children.
|
||||
// This does not affect the inodes.
|
||||
func (n *node) removeChild(target *node) {
|
||||
for i, child := range n.children {
|
||||
if child == target {
|
||||
n.children = append(n.children[:i], n.children[i+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dereference causes the node to copy all its inode key/value references to heap memory.
|
||||
// This is required when the mmap is reallocated so inodes are not pointing to stale data.
|
||||
func (n *node) dereference() {
|
||||
if n.key != nil {
|
||||
key := make([]byte, len(n.key))
|
||||
copy(key, n.key)
|
||||
n.key = key
|
||||
_assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node")
|
||||
}
|
||||
|
||||
for i := range n.inodes {
|
||||
inode := &n.inodes[i]
|
||||
|
||||
key := make([]byte, len(inode.key))
|
||||
copy(key, inode.key)
|
||||
inode.key = key
|
||||
_assert(len(inode.key) > 0, "dereference: zero-length inode key")
|
||||
|
||||
value := make([]byte, len(inode.value))
|
||||
copy(value, inode.value)
|
||||
inode.value = value
|
||||
}
|
||||
|
||||
// Recursively dereference children.
|
||||
for _, child := range n.children {
|
||||
child.dereference()
|
||||
}
|
||||
|
||||
// Update statistics.
|
||||
n.bucket.tx.stats.NodeDeref++
|
||||
}
|
||||
|
||||
// free adds the node's underlying page to the freelist.
|
||||
func (n *node) free() {
|
||||
if n.pgid != 0 {
|
||||
n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid))
|
||||
n.pgid = 0
|
||||
}
|
||||
}
|
||||
|
||||
// dump writes the contents of the node to STDERR for debugging purposes.
|
||||
/*
|
||||
func (n *node) dump() {
|
||||
// Write node header.
|
||||
var typ = "branch"
|
||||
if n.isLeaf {
|
||||
typ = "leaf"
|
||||
}
|
||||
warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes))
|
||||
|
||||
// Write out abbreviated version of each item.
|
||||
for _, item := range n.inodes {
|
||||
if n.isLeaf {
|
||||
if item.flags&bucketLeafFlag != 0 {
|
||||
bucket := (*bucket)(unsafe.Pointer(&item.value[0]))
|
||||
warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root)
|
||||
} else {
|
||||
warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4))
|
||||
}
|
||||
} else {
|
||||
warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid)
|
||||
}
|
||||
}
|
||||
warn("")
|
||||
}
|
||||
*/
|
||||
|
||||
type nodes []*node
|
||||
|
||||
func (s nodes) Len() int { return len(s) }
|
||||
func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 }
|
||||
|
||||
// inode represents an internal node inside of a node.
|
||||
// It can be used to point to elements in a page or point
|
||||
// to an element which hasn't been added to a page yet.
|
||||
type inode struct {
|
||||
flags uint32
|
||||
pgid pgid
|
||||
key []byte
|
||||
value []byte
|
||||
}
|
||||
|
||||
type inodes []inode
|
197
vendor/github.com/boltdb/bolt/page.go
generated
vendored
Normal file
197
vendor/github.com/boltdb/bolt/page.go
generated
vendored
Normal file
@ -0,0 +1,197 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr))
|
||||
|
||||
const minKeysPerPage = 2
|
||||
|
||||
const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{}))
|
||||
const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{}))
|
||||
|
||||
const (
|
||||
branchPageFlag = 0x01
|
||||
leafPageFlag = 0x02
|
||||
metaPageFlag = 0x04
|
||||
freelistPageFlag = 0x10
|
||||
)
|
||||
|
||||
const (
|
||||
bucketLeafFlag = 0x01
|
||||
)
|
||||
|
||||
type pgid uint64
|
||||
|
||||
type page struct {
|
||||
id pgid
|
||||
flags uint16
|
||||
count uint16
|
||||
overflow uint32
|
||||
ptr uintptr
|
||||
}
|
||||
|
||||
// typ returns a human readable page type string used for debugging.
|
||||
func (p *page) typ() string {
|
||||
if (p.flags & branchPageFlag) != 0 {
|
||||
return "branch"
|
||||
} else if (p.flags & leafPageFlag) != 0 {
|
||||
return "leaf"
|
||||
} else if (p.flags & metaPageFlag) != 0 {
|
||||
return "meta"
|
||||
} else if (p.flags & freelistPageFlag) != 0 {
|
||||
return "freelist"
|
||||
}
|
||||
return fmt.Sprintf("unknown<%02x>", p.flags)
|
||||
}
|
||||
|
||||
// meta returns a pointer to the metadata section of the page.
|
||||
func (p *page) meta() *meta {
|
||||
return (*meta)(unsafe.Pointer(&p.ptr))
|
||||
}
|
||||
|
||||
// leafPageElement retrieves the leaf node by index
|
||||
func (p *page) leafPageElement(index uint16) *leafPageElement {
|
||||
n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index]
|
||||
return n
|
||||
}
|
||||
|
||||
// leafPageElements retrieves a list of leaf nodes.
|
||||
func (p *page) leafPageElements() []leafPageElement {
|
||||
if p.count == 0 {
|
||||
return nil
|
||||
}
|
||||
return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
||||
}
|
||||
|
||||
// branchPageElement retrieves the branch node by index
|
||||
func (p *page) branchPageElement(index uint16) *branchPageElement {
|
||||
return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index]
|
||||
}
|
||||
|
||||
// branchPageElements retrieves a list of branch nodes.
|
||||
func (p *page) branchPageElements() []branchPageElement {
|
||||
if p.count == 0 {
|
||||
return nil
|
||||
}
|
||||
return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
|
||||
}
|
||||
|
||||
// dump writes n bytes of the page to STDERR as hex output.
|
||||
func (p *page) hexdump(n int) {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n]
|
||||
fmt.Fprintf(os.Stderr, "%x\n", buf)
|
||||
}
|
||||
|
||||
type pages []*page
|
||||
|
||||
func (s pages) Len() int { return len(s) }
|
||||
func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s pages) Less(i, j int) bool { return s[i].id < s[j].id }
|
||||
|
||||
// branchPageElement represents a node on a branch page.
|
||||
type branchPageElement struct {
|
||||
pos uint32
|
||||
ksize uint32
|
||||
pgid pgid
|
||||
}
|
||||
|
||||
// key returns a byte slice of the node key.
|
||||
func (n *branchPageElement) key() []byte {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
|
||||
}
|
||||
|
||||
// leafPageElement represents a node on a leaf page.
|
||||
type leafPageElement struct {
|
||||
flags uint32
|
||||
pos uint32
|
||||
ksize uint32
|
||||
vsize uint32
|
||||
}
|
||||
|
||||
// key returns a byte slice of the node key.
|
||||
func (n *leafPageElement) key() []byte {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize]
|
||||
}
|
||||
|
||||
// value returns a byte slice of the node value.
|
||||
func (n *leafPageElement) value() []byte {
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
|
||||
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize]
|
||||
}
|
||||
|
||||
// PageInfo represents human readable information about a page.
|
||||
type PageInfo struct {
|
||||
ID int
|
||||
Type string
|
||||
Count int
|
||||
OverflowCount int
|
||||
}
|
||||
|
||||
type pgids []pgid
|
||||
|
||||
func (s pgids) Len() int { return len(s) }
|
||||
func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s pgids) Less(i, j int) bool { return s[i] < s[j] }
|
||||
|
||||
// merge returns the sorted union of a and b.
|
||||
func (a pgids) merge(b pgids) pgids {
|
||||
// Return the opposite slice if one is nil.
|
||||
if len(a) == 0 {
|
||||
return b
|
||||
}
|
||||
if len(b) == 0 {
|
||||
return a
|
||||
}
|
||||
merged := make(pgids, len(a)+len(b))
|
||||
mergepgids(merged, a, b)
|
||||
return merged
|
||||
}
|
||||
|
||||
// mergepgids copies the sorted union of a and b into dst.
|
||||
// If dst is too small, it panics.
|
||||
func mergepgids(dst, a, b pgids) {
|
||||
if len(dst) < len(a)+len(b) {
|
||||
panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
|
||||
}
|
||||
// Copy in the opposite slice if one is nil.
|
||||
if len(a) == 0 {
|
||||
copy(dst, b)
|
||||
return
|
||||
}
|
||||
if len(b) == 0 {
|
||||
copy(dst, a)
|
||||
return
|
||||
}
|
||||
|
||||
// Merged will hold all elements from both lists.
|
||||
merged := dst[:0]
|
||||
|
||||
// Assign lead to the slice with a lower starting value, follow to the higher value.
|
||||
lead, follow := a, b
|
||||
if b[0] < a[0] {
|
||||
lead, follow = b, a
|
||||
}
|
||||
|
||||
// Continue while there are elements in the lead.
|
||||
for len(lead) > 0 {
|
||||
// Merge largest prefix of lead that is ahead of follow[0].
|
||||
n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
|
||||
merged = append(merged, lead[:n]...)
|
||||
if n >= len(lead) {
|
||||
break
|
||||
}
|
||||
|
||||
// Swap lead and follow.
|
||||
lead, follow = follow, lead[n:]
|
||||
}
|
||||
|
||||
// Append what's left in follow.
|
||||
_ = append(merged, follow...)
|
||||
}
|
686
vendor/github.com/boltdb/bolt/tx.go
generated
vendored
Normal file
686
vendor/github.com/boltdb/bolt/tx.go
generated
vendored
Normal file
@ -0,0 +1,686 @@
|
||||
package bolt
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// txid represents the internal transaction identifier.
|
||||
type txid uint64
|
||||
|
||||
// Tx represents a read-only or read/write transaction on the database.
|
||||
// Read-only transactions can be used for retrieving values for keys and creating cursors.
|
||||
// Read/write transactions can create and remove buckets and create and remove keys.
|
||||
//
|
||||
// IMPORTANT: You must commit or rollback transactions when you are done with
|
||||
// them. Pages can not be reclaimed by the writer until no more transactions
|
||||
// are using them. A long running read transaction can cause the database to
|
||||
// quickly grow.
|
||||
type Tx struct {
|
||||
writable bool
|
||||
managed bool
|
||||
db *DB
|
||||
meta *meta
|
||||
root Bucket
|
||||
pages map[pgid]*page
|
||||
stats TxStats
|
||||
commitHandlers []func()
|
||||
|
||||
// WriteFlag specifies the flag for write-related methods like WriteTo().
|
||||
// Tx opens the database file with the specified flag to copy the data.
|
||||
//
|
||||
// By default, the flag is unset, which works well for mostly in-memory
|
||||
// workloads. For databases that are much larger than available RAM,
|
||||
// set the flag to syscall.O_DIRECT to avoid trashing the page cache.
|
||||
WriteFlag int
|
||||
}
|
||||
|
||||
// init initializes the transaction.
|
||||
func (tx *Tx) init(db *DB) {
|
||||
tx.db = db
|
||||
tx.pages = nil
|
||||
|
||||
// Copy the meta page since it can be changed by the writer.
|
||||
tx.meta = &meta{}
|
||||
db.meta().copy(tx.meta)
|
||||
|
||||
// Copy over the root bucket.
|
||||
tx.root = newBucket(tx)
|
||||
tx.root.bucket = &bucket{}
|
||||
*tx.root.bucket = tx.meta.root
|
||||
|
||||
// Increment the transaction id and add a page cache for writable transactions.
|
||||
if tx.writable {
|
||||
tx.pages = make(map[pgid]*page)
|
||||
tx.meta.txid += txid(1)
|
||||
}
|
||||
}
|
||||
|
||||
// ID returns the transaction id.
|
||||
func (tx *Tx) ID() int {
|
||||
return int(tx.meta.txid)
|
||||
}
|
||||
|
||||
// DB returns a reference to the database that created the transaction.
|
||||
func (tx *Tx) DB() *DB {
|
||||
return tx.db
|
||||
}
|
||||
|
||||
// Size returns current database size in bytes as seen by this transaction.
|
||||
func (tx *Tx) Size() int64 {
|
||||
return int64(tx.meta.pgid) * int64(tx.db.pageSize)
|
||||
}
|
||||
|
||||
// Writable returns whether the transaction can perform write operations.
|
||||
func (tx *Tx) Writable() bool {
|
||||
return tx.writable
|
||||
}
|
||||
|
||||
// Cursor creates a cursor associated with the root bucket.
|
||||
// All items in the cursor will return a nil value because all root bucket keys point to buckets.
|
||||
// The cursor is only valid as long as the transaction is open.
|
||||
// Do not use a cursor after the transaction is closed.
|
||||
func (tx *Tx) Cursor() *Cursor {
|
||||
return tx.root.Cursor()
|
||||
}
|
||||
|
||||
// Stats retrieves a copy of the current transaction statistics.
|
||||
func (tx *Tx) Stats() TxStats {
|
||||
return tx.stats
|
||||
}
|
||||
|
||||
// Bucket retrieves a bucket by name.
|
||||
// Returns nil if the bucket does not exist.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (tx *Tx) Bucket(name []byte) *Bucket {
|
||||
return tx.root.Bucket(name)
|
||||
}
|
||||
|
||||
// CreateBucket creates a new bucket.
|
||||
// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
|
||||
return tx.root.CreateBucket(name)
|
||||
}
|
||||
|
||||
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
|
||||
// Returns an error if the bucket name is blank, or if the bucket name is too long.
|
||||
// The bucket instance is only valid for the lifetime of the transaction.
|
||||
func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
|
||||
return tx.root.CreateBucketIfNotExists(name)
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket.
|
||||
// Returns an error if the bucket cannot be found or if the key represents a non-bucket value.
|
||||
func (tx *Tx) DeleteBucket(name []byte) error {
|
||||
return tx.root.DeleteBucket(name)
|
||||
}
|
||||
|
||||
// ForEach executes a function for each bucket in the root.
|
||||
// If the provided function returns an error then the iteration is stopped and
|
||||
// the error is returned to the caller.
|
||||
func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
|
||||
return tx.root.ForEach(func(k, v []byte) error {
|
||||
if err := fn(k, tx.root.Bucket(k)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// OnCommit adds a handler function to be executed after the transaction successfully commits.
|
||||
func (tx *Tx) OnCommit(fn func()) {
|
||||
tx.commitHandlers = append(tx.commitHandlers, fn)
|
||||
}
|
||||
|
||||
// Commit writes all changes to disk and updates the meta page.
|
||||
// Returns an error if a disk write error occurs, or if Commit is
|
||||
// called on a read-only transaction.
|
||||
func (tx *Tx) Commit() error {
|
||||
_assert(!tx.managed, "managed tx commit not allowed")
|
||||
if tx.db == nil {
|
||||
return ErrTxClosed
|
||||
} else if !tx.writable {
|
||||
return ErrTxNotWritable
|
||||
}
|
||||
|
||||
// TODO(benbjohnson): Use vectorized I/O to write out dirty pages.
|
||||
|
||||
// Rebalance nodes which have had deletions.
|
||||
var startTime = time.Now()
|
||||
tx.root.rebalance()
|
||||
if tx.stats.Rebalance > 0 {
|
||||
tx.stats.RebalanceTime += time.Since(startTime)
|
||||
}
|
||||
|
||||
// spill data onto dirty pages.
|
||||
startTime = time.Now()
|
||||
if err := tx.root.spill(); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
tx.stats.SpillTime += time.Since(startTime)
|
||||
|
||||
// Free the old root bucket.
|
||||
tx.meta.root.root = tx.root.root
|
||||
|
||||
opgid := tx.meta.pgid
|
||||
|
||||
// Free the freelist and allocate new pages for it. This will overestimate
|
||||
// the size of the freelist but not underestimate the size (which would be bad).
|
||||
tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
|
||||
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
|
||||
if err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
if err := tx.db.freelist.write(p); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
tx.meta.freelist = p.id
|
||||
|
||||
// If the high water mark has moved up then attempt to grow the database.
|
||||
if tx.meta.pgid > opgid {
|
||||
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Write dirty pages to disk.
|
||||
startTime = time.Now()
|
||||
if err := tx.write(); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
// If strict mode is enabled then perform a consistency check.
|
||||
// Only the first consistency error is reported in the panic.
|
||||
if tx.db.StrictMode {
|
||||
ch := tx.Check()
|
||||
var errs []string
|
||||
for {
|
||||
err, ok := <-ch
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
errs = append(errs, err.Error())
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
panic("check fail: " + strings.Join(errs, "\n"))
|
||||
}
|
||||
}
|
||||
|
||||
// Write meta to disk.
|
||||
if err := tx.writeMeta(); err != nil {
|
||||
tx.rollback()
|
||||
return err
|
||||
}
|
||||
tx.stats.WriteTime += time.Since(startTime)
|
||||
|
||||
// Finalize the transaction.
|
||||
tx.close()
|
||||
|
||||
// Execute commit handlers now that the locks have been removed.
|
||||
for _, fn := range tx.commitHandlers {
|
||||
fn()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rollback closes the transaction and ignores all previous updates. Read-only
|
||||
// transactions must be rolled back and not committed.
|
||||
func (tx *Tx) Rollback() error {
|
||||
_assert(!tx.managed, "managed tx rollback not allowed")
|
||||
if tx.db == nil {
|
||||
return ErrTxClosed
|
||||
}
|
||||
tx.rollback()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tx *Tx) rollback() {
|
||||
if tx.db == nil {
|
||||
return
|
||||
}
|
||||
if tx.writable {
|
||||
tx.db.freelist.rollback(tx.meta.txid)
|
||||
tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
|
||||
}
|
||||
tx.close()
|
||||
}
|
||||
|
||||
func (tx *Tx) close() {
|
||||
if tx.db == nil {
|
||||
return
|
||||
}
|
||||
if tx.writable {
|
||||
// Grab freelist stats.
|
||||
var freelistFreeN = tx.db.freelist.free_count()
|
||||
var freelistPendingN = tx.db.freelist.pending_count()
|
||||
var freelistAlloc = tx.db.freelist.size()
|
||||
|
||||
// Remove transaction ref & writer lock.
|
||||
tx.db.rwtx = nil
|
||||
tx.db.rwlock.Unlock()
|
||||
|
||||
// Merge statistics.
|
||||
tx.db.statlock.Lock()
|
||||
tx.db.stats.FreePageN = freelistFreeN
|
||||
tx.db.stats.PendingPageN = freelistPendingN
|
||||
tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize
|
||||
tx.db.stats.FreelistInuse = freelistAlloc
|
||||
tx.db.stats.TxStats.add(&tx.stats)
|
||||
tx.db.statlock.Unlock()
|
||||
} else {
|
||||
tx.db.removeTx(tx)
|
||||
}
|
||||
|
||||
// Clear all references.
|
||||
tx.db = nil
|
||||
tx.meta = nil
|
||||
tx.root = Bucket{tx: tx}
|
||||
tx.pages = nil
|
||||
}
|
||||
|
||||
// Copy writes the entire database to a writer.
|
||||
// This function exists for backwards compatibility.
|
||||
//
|
||||
// Deprecated; Use WriteTo() instead.
|
||||
func (tx *Tx) Copy(w io.Writer) error {
|
||||
_, err := tx.WriteTo(w)
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteTo writes the entire database to a writer.
|
||||
// If err == nil then exactly tx.Size() bytes will be written into the writer.
|
||||
func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
|
||||
// Attempt to open reader with WriteFlag
|
||||
f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() { _ = f.Close() }()
|
||||
|
||||
// Generate a meta page. We use the same page data for both meta pages.
|
||||
buf := make([]byte, tx.db.pageSize)
|
||||
page := (*page)(unsafe.Pointer(&buf[0]))
|
||||
page.flags = metaPageFlag
|
||||
*page.meta() = *tx.meta
|
||||
|
||||
// Write meta 0.
|
||||
page.id = 0
|
||||
page.meta().checksum = page.meta().sum64()
|
||||
nn, err := w.Write(buf)
|
||||
n += int64(nn)
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("meta 0 copy: %s", err)
|
||||
}
|
||||
|
||||
// Write meta 1 with a lower transaction id.
|
||||
page.id = 1
|
||||
page.meta().txid -= 1
|
||||
page.meta().checksum = page.meta().sum64()
|
||||
nn, err = w.Write(buf)
|
||||
n += int64(nn)
|
||||
if err != nil {
|
||||
return n, fmt.Errorf("meta 1 copy: %s", err)
|
||||
}
|
||||
|
||||
// Move past the meta pages in the file.
|
||||
if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil {
|
||||
return n, fmt.Errorf("seek: %s", err)
|
||||
}
|
||||
|
||||
// Copy data pages.
|
||||
wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
|
||||
n += wn
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, f.Close()
|
||||
}
|
||||
|
||||
// CopyFile copies the entire database to file at the given path.
|
||||
// A reader transaction is maintained during the copy so it is safe to continue
|
||||
// using the database while a copy is in progress.
|
||||
func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
|
||||
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = tx.Copy(f)
|
||||
if err != nil {
|
||||
_ = f.Close()
|
||||
return err
|
||||
}
|
||||
return f.Close()
|
||||
}
|
||||
|
||||
// Check performs several consistency checks on the database for this transaction.
|
||||
// An error is returned if any inconsistency is found.
|
||||
//
|
||||
// It can be safely run concurrently on a writable transaction. However, this
|
||||
// incurs a high cost for large databases and databases with a lot of subbuckets
|
||||
// because of caching. This overhead can be removed if running on a read-only
|
||||
// transaction, however, it is not safe to execute other writer transactions at
|
||||
// the same time.
|
||||
func (tx *Tx) Check() <-chan error {
|
||||
ch := make(chan error)
|
||||
go tx.check(ch)
|
||||
return ch
|
||||
}
|
||||
|
||||
func (tx *Tx) check(ch chan error) {
|
||||
// Check if any pages are double freed.
|
||||
freed := make(map[pgid]bool)
|
||||
all := make([]pgid, tx.db.freelist.count())
|
||||
tx.db.freelist.copyall(all)
|
||||
for _, id := range all {
|
||||
if freed[id] {
|
||||
ch <- fmt.Errorf("page %d: already freed", id)
|
||||
}
|
||||
freed[id] = true
|
||||
}
|
||||
|
||||
// Track every reachable page.
|
||||
reachable := make(map[pgid]*page)
|
||||
reachable[0] = tx.page(0) // meta0
|
||||
reachable[1] = tx.page(1) // meta1
|
||||
for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
|
||||
reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
|
||||
}
|
||||
|
||||
// Recursively check buckets.
|
||||
tx.checkBucket(&tx.root, reachable, freed, ch)
|
||||
|
||||
// Ensure all pages below high water mark are either reachable or freed.
|
||||
for i := pgid(0); i < tx.meta.pgid; i++ {
|
||||
_, isReachable := reachable[i]
|
||||
if !isReachable && !freed[i] {
|
||||
ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
|
||||
}
|
||||
}
|
||||
|
||||
// Close the channel to signal completion.
|
||||
close(ch)
|
||||
}
|
||||
|
||||
func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) {
|
||||
// Ignore inline buckets.
|
||||
if b.root == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// Check every page used by this bucket.
|
||||
b.tx.forEachPage(b.root, 0, func(p *page, _ int) {
|
||||
if p.id > tx.meta.pgid {
|
||||
ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid))
|
||||
}
|
||||
|
||||
// Ensure each page is only referenced once.
|
||||
for i := pgid(0); i <= pgid(p.overflow); i++ {
|
||||
var id = p.id + i
|
||||
if _, ok := reachable[id]; ok {
|
||||
ch <- fmt.Errorf("page %d: multiple references", int(id))
|
||||
}
|
||||
reachable[id] = p
|
||||
}
|
||||
|
||||
// We should only encounter un-freed leaf and branch pages.
|
||||
if freed[p.id] {
|
||||
ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
|
||||
} else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
|
||||
ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ())
|
||||
}
|
||||
})
|
||||
|
||||
// Check each bucket within this bucket.
|
||||
_ = b.ForEach(func(k, v []byte) error {
|
||||
if child := b.Bucket(k); child != nil {
|
||||
tx.checkBucket(child, reachable, freed, ch)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// allocate returns a contiguous block of memory starting at a given page.
|
||||
func (tx *Tx) allocate(count int) (*page, error) {
|
||||
p, err := tx.db.allocate(count)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Save to our page cache.
|
||||
tx.pages[p.id] = p
|
||||
|
||||
// Update statistics.
|
||||
tx.stats.PageCount++
|
||||
tx.stats.PageAlloc += count * tx.db.pageSize
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// write writes any dirty pages to disk.
|
||||
func (tx *Tx) write() error {
|
||||
// Sort pages by id.
|
||||
pages := make(pages, 0, len(tx.pages))
|
||||
for _, p := range tx.pages {
|
||||
pages = append(pages, p)
|
||||
}
|
||||
// Clear out page cache early.
|
||||
tx.pages = make(map[pgid]*page)
|
||||
sort.Sort(pages)
|
||||
|
||||
// Write pages to disk in order.
|
||||
for _, p := range pages {
|
||||
size := (int(p.overflow) + 1) * tx.db.pageSize
|
||||
offset := int64(p.id) * int64(tx.db.pageSize)
|
||||
|
||||
// Write out page in "max allocation" sized chunks.
|
||||
ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p))
|
||||
for {
|
||||
// Limit our write to our max allocation size.
|
||||
sz := size
|
||||
if sz > maxAllocSize-1 {
|
||||
sz = maxAllocSize - 1
|
||||
}
|
||||
|
||||
// Write chunk to disk.
|
||||
buf := ptr[:sz]
|
||||
if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Update statistics.
|
||||
tx.stats.Write++
|
||||
|
||||
// Exit inner for loop if we've written all the chunks.
|
||||
size -= sz
|
||||
if size == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Otherwise move offset forward and move pointer to next chunk.
|
||||
offset += int64(sz)
|
||||
ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
|
||||
}
|
||||
}
|
||||
|
||||
// Ignore file sync if flag is set on DB.
|
||||
if !tx.db.NoSync || IgnoreNoSync {
|
||||
if err := fdatasync(tx.db); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Put small pages back to page pool.
|
||||
for _, p := range pages {
|
||||
// Ignore page sizes over 1 page.
|
||||
// These are allocated using make() instead of the page pool.
|
||||
if int(p.overflow) != 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize]
|
||||
|
||||
// See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
|
||||
for i := range buf {
|
||||
buf[i] = 0
|
||||
}
|
||||
tx.db.pagePool.Put(buf)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeMeta writes the meta to the disk.
|
||||
func (tx *Tx) writeMeta() error {
|
||||
// Create a temporary buffer for the meta page.
|
||||
buf := make([]byte, tx.db.pageSize)
|
||||
p := tx.db.pageInBuffer(buf, 0)
|
||||
tx.meta.write(p)
|
||||
|
||||
// Write the meta page to file.
|
||||
if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil {
|
||||
return err
|
||||
}
|
||||
if !tx.db.NoSync || IgnoreNoSync {
|
||||
if err := fdatasync(tx.db); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Update statistics.
|
||||
tx.stats.Write++
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// page returns a reference to the page with a given id.
|
||||
// If page has been written to then a temporary buffered page is returned.
|
||||
func (tx *Tx) page(id pgid) *page {
|
||||
// Check the dirty pages first.
|
||||
if tx.pages != nil {
|
||||
if p, ok := tx.pages[id]; ok {
|
||||
return p
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise return directly from the mmap.
|
||||
return tx.db.page(id)
|
||||
}
|
||||
|
||||
// forEachPage iterates over every page within a given page and executes a function.
|
||||
func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
|
||||
p := tx.page(pgid)
|
||||
|
||||
// Execute function.
|
||||
fn(p, depth)
|
||||
|
||||
// Recursively loop over children.
|
||||
if (p.flags & branchPageFlag) != 0 {
|
||||
for i := 0; i < int(p.count); i++ {
|
||||
elem := p.branchPageElement(uint16(i))
|
||||
tx.forEachPage(elem.pgid, depth+1, fn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Page returns page information for a given page number.
|
||||
// This is only safe for concurrent use when used by a writable transaction.
|
||||
func (tx *Tx) Page(id int) (*PageInfo, error) {
|
||||
if tx.db == nil {
|
||||
return nil, ErrTxClosed
|
||||
} else if pgid(id) >= tx.meta.pgid {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Build the page info.
|
||||
p := tx.db.page(pgid(id))
|
||||
info := &PageInfo{
|
||||
ID: id,
|
||||
Count: int(p.count),
|
||||
OverflowCount: int(p.overflow),
|
||||
}
|
||||
|
||||
// Determine the type (or if it's free).
|
||||
if tx.db.freelist.freed(pgid(id)) {
|
||||
info.Type = "free"
|
||||
} else {
|
||||
info.Type = p.typ()
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// TxStats represents statistics about the actions performed by the transaction.
|
||||
type TxStats struct {
|
||||
// Page statistics.
|
||||
PageCount int // number of page allocations
|
||||
PageAlloc int // total bytes allocated
|
||||
|
||||
// Cursor statistics.
|
||||
CursorCount int // number of cursors created
|
||||
|
||||
// Node statistics
|
||||
NodeCount int // number of node allocations
|
||||
NodeDeref int // number of node dereferences
|
||||
|
||||
// Rebalance statistics.
|
||||
Rebalance int // number of node rebalances
|
||||
RebalanceTime time.Duration // total time spent rebalancing
|
||||
|
||||
// Split/Spill statistics.
|
||||
Split int // number of nodes split
|
||||
Spill int // number of nodes spilled
|
||||
SpillTime time.Duration // total time spent spilling
|
||||
|
||||
// Write statistics.
|
||||
Write int // number of writes performed
|
||||
WriteTime time.Duration // total time spent writing to disk
|
||||
}
|
||||
|
||||
func (s *TxStats) add(other *TxStats) {
|
||||
s.PageCount += other.PageCount
|
||||
s.PageAlloc += other.PageAlloc
|
||||
s.CursorCount += other.CursorCount
|
||||
s.NodeCount += other.NodeCount
|
||||
s.NodeDeref += other.NodeDeref
|
||||
s.Rebalance += other.Rebalance
|
||||
s.RebalanceTime += other.RebalanceTime
|
||||
s.Split += other.Split
|
||||
s.Spill += other.Spill
|
||||
s.SpillTime += other.SpillTime
|
||||
s.Write += other.Write
|
||||
s.WriteTime += other.WriteTime
|
||||
}
|
||||
|
||||
// Sub calculates and returns the difference between two sets of transaction stats.
|
||||
// This is useful when obtaining stats at two different points and time and
|
||||
// you need the performance counters that occurred within that time span.
|
||||
func (s *TxStats) Sub(other *TxStats) TxStats {
|
||||
var diff TxStats
|
||||
diff.PageCount = s.PageCount - other.PageCount
|
||||
diff.PageAlloc = s.PageAlloc - other.PageAlloc
|
||||
diff.CursorCount = s.CursorCount - other.CursorCount
|
||||
diff.NodeCount = s.NodeCount - other.NodeCount
|
||||
diff.NodeDeref = s.NodeDeref - other.NodeDeref
|
||||
diff.Rebalance = s.Rebalance - other.Rebalance
|
||||
diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime
|
||||
diff.Split = s.Split - other.Split
|
||||
diff.Spill = s.Spill - other.Spill
|
||||
diff.SpillTime = s.SpillTime - other.SpillTime
|
||||
diff.Write = s.Write - other.Write
|
||||
diff.WriteTime = s.WriteTime - other.WriteTime
|
||||
return diff
|
||||
}
|
101
vendor/github.com/containers/image/copy/copy.go
generated
vendored
101
vendor/github.com/containers/image/copy/copy.go
generated
vendored
@ -13,6 +13,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/pkg/blobinfocache"
|
||||
"github.com/containers/image/pkg/compression"
|
||||
"github.com/containers/image/signature"
|
||||
"github.com/containers/image/transports"
|
||||
@ -24,14 +25,16 @@ import (
|
||||
)
|
||||
|
||||
type digestingReader struct {
|
||||
source io.Reader
|
||||
digester digest.Digester
|
||||
expectedDigest digest.Digest
|
||||
validationFailed bool
|
||||
source io.Reader
|
||||
digester digest.Digester
|
||||
expectedDigest digest.Digest
|
||||
validationFailed bool
|
||||
validationSucceeded bool
|
||||
}
|
||||
|
||||
// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
|
||||
// and set validationFailed to true if the source stream does not match expectedDigest.
|
||||
// or set validationSucceeded/validationFailed to true if the source stream does/does not match expectedDigest.
|
||||
// (neither is set if EOF is never reached).
|
||||
func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
|
||||
if err := expectedDigest.Validate(); err != nil {
|
||||
return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
|
||||
@ -64,6 +67,7 @@ func (d *digestingReader) Read(p []byte) (int, error) {
|
||||
d.validationFailed = true
|
||||
return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
|
||||
}
|
||||
d.validationSucceeded = true
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
@ -71,21 +75,22 @@ func (d *digestingReader) Read(p []byte) (int, error) {
|
||||
// copier allows us to keep track of diffID values for blobs, and other
|
||||
// data shared across one or more images in a possible manifest list.
|
||||
type copier struct {
|
||||
cachedDiffIDs map[digest.Digest]digest.Digest
|
||||
dest types.ImageDestination
|
||||
rawSource types.ImageSource
|
||||
reportWriter io.Writer
|
||||
progressInterval time.Duration
|
||||
progress chan types.ProgressProperties
|
||||
blobInfoCache types.BlobInfoCache
|
||||
}
|
||||
|
||||
// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
|
||||
type imageCopier struct {
|
||||
c *copier
|
||||
manifestUpdates *types.ManifestUpdateOptions
|
||||
src types.Image
|
||||
diffIDsAreNeeded bool
|
||||
canModifyManifest bool
|
||||
c *copier
|
||||
manifestUpdates *types.ManifestUpdateOptions
|
||||
src types.Image
|
||||
diffIDsAreNeeded bool
|
||||
canModifyManifest bool
|
||||
canSubstituteBlobs bool
|
||||
}
|
||||
|
||||
// Options allows supplying non-default configuration modifying the behavior of CopyImage.
|
||||
@ -141,12 +146,15 @@ func Image(ctx context.Context, policyContext *signature.PolicyContext, destRef,
|
||||
}()
|
||||
|
||||
c := &copier{
|
||||
cachedDiffIDs: make(map[digest.Digest]digest.Digest),
|
||||
dest: dest,
|
||||
rawSource: rawSource,
|
||||
reportWriter: reportWriter,
|
||||
progressInterval: options.ProgressInterval,
|
||||
progress: options.Progress,
|
||||
// FIXME? The cache is used for sources and destinations equally, but we only have a SourceCtx and DestinationCtx.
|
||||
// For now, use DestinationCtx (because blob reuse changes the behavior of the destination side more); eventually
|
||||
// we might want to add a separate CommonCtx — or would that be too confusing?
|
||||
blobInfoCache: blobinfocache.DefaultCache(options.DestinationCtx),
|
||||
}
|
||||
|
||||
unparsedToplevel := image.UnparsedInstance(rawSource, nil)
|
||||
@ -235,6 +243,13 @@ func (c *copier) copyOneImage(ctx context.Context, policyContext *signature.Poli
|
||||
src: src,
|
||||
// diffIDsAreNeeded is computed later
|
||||
canModifyManifest: len(sigs) == 0,
|
||||
// Ensure _this_ copy sees exactly the intended data when either processing a signed image or signing it.
|
||||
// This may be too conservative, but for now, better safe than sorry, _especially_ on the SignBy path:
|
||||
// The signature makes the content non-repudiable, so it very much matters that the signature is made over exactly what the user intended.
|
||||
// We do intend the RecordDigestUncompressedPair calls to only work with reliable data, but at least there’s a risk
|
||||
// that the compressed version coming from a third party may be designed to attack some other decompressor implementation,
|
||||
// and we would reuse and sign it.
|
||||
canSubstituteBlobs: len(sigs) == 0 && options.SignBy == "",
|
||||
}
|
||||
|
||||
if err := ic.updateEmbeddedDockerReference(); err != nil {
|
||||
@ -498,32 +513,24 @@ type diffIDResult struct {
|
||||
// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress,
|
||||
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
|
||||
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) {
|
||||
// Check if we already have a blob with this digest
|
||||
haveBlob, extantBlobSize, err := ic.c.dest.HasBlob(ctx, srcInfo)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error checking for blob %s at destination", srcInfo.Digest)
|
||||
}
|
||||
// If we already have a cached diffID for this blob, we don't need to compute it
|
||||
diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.c.cachedDiffIDs[srcInfo.Digest] == "")
|
||||
// If we already have the blob, and we don't need to recompute the diffID, then we might be able to avoid reading it again
|
||||
if haveBlob && !diffIDIsNeeded {
|
||||
// Check the blob sizes match, if we were given a size this time
|
||||
if srcInfo.Size != -1 && srcInfo.Size != extantBlobSize {
|
||||
return types.BlobInfo{}, "", errors.Errorf("Error: blob %s is already present, but with size %d instead of %d", srcInfo.Digest, extantBlobSize, srcInfo.Size)
|
||||
}
|
||||
srcInfo.Size = extantBlobSize
|
||||
// Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob
|
||||
blobinfo, err := ic.c.dest.ReapplyBlob(ctx, srcInfo)
|
||||
cachedDiffID := ic.c.blobInfoCache.UncompressedDigest(srcInfo.Digest) // May be ""
|
||||
diffIDIsNeeded := ic.diffIDsAreNeeded && cachedDiffID == ""
|
||||
|
||||
// If we already have the blob, and we don't need to compute the diffID, then we don't need to read it from the source.
|
||||
if !diffIDIsNeeded {
|
||||
reused, blobInfo, err := ic.c.dest.TryReusingBlob(ctx, srcInfo, ic.c.blobInfoCache, ic.canSubstituteBlobs)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error reapplying blob %s at destination", srcInfo.Digest)
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error trying to reuse blob %s at destination", srcInfo.Digest)
|
||||
}
|
||||
if reused {
|
||||
ic.c.Printf("Skipping fetch of repeat blob %s\n", srcInfo.Digest)
|
||||
return blobInfo, cachedDiffID, nil
|
||||
}
|
||||
ic.c.Printf("Skipping fetch of repeat blob %s\n", srcInfo.Digest)
|
||||
return blobinfo, ic.c.cachedDiffIDs[srcInfo.Digest], err
|
||||
}
|
||||
|
||||
// Fallback: copy the layer, computing the diffID if we need to do so
|
||||
ic.c.Printf("Copying blob %s\n", srcInfo.Digest)
|
||||
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo)
|
||||
srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(ctx, srcInfo, ic.c.blobInfoCache)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
|
||||
}
|
||||
@ -543,11 +550,13 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo) (t
|
||||
return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID")
|
||||
}
|
||||
logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
|
||||
ic.c.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest
|
||||
// This is safe because we have just computed diffIDResult.Digest ourselves, and in the process
|
||||
// we have read all of the input blob, so srcInfo.Digest must have been validated by digestingReader.
|
||||
ic.c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, diffIDResult.digest)
|
||||
return blobInfo, diffIDResult.digest, nil
|
||||
}
|
||||
} else {
|
||||
return blobInfo, ic.c.cachedDiffIDs[srcInfo.Digest], nil
|
||||
return blobInfo, cachedDiffID, nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -624,7 +633,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
// === Process input through digestingReader to validate against the expected digest.
|
||||
// Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
|
||||
// use a separate validation failure indicator.
|
||||
// Note that we don't use a stronger "validationSucceeded" indicator, because
|
||||
// Note that for this check we don't use the stronger "validationSucceeded" indicator, because
|
||||
// dest.PutBlob may detect that the layer already exists, in which case we don't
|
||||
// read stream to the end, and validation does not happen.
|
||||
digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
|
||||
@ -660,8 +669,10 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
|
||||
// === Deal with layer compression/decompression if necessary
|
||||
var inputInfo types.BlobInfo
|
||||
var compressionOperation types.LayerCompression
|
||||
if canModifyBlob && c.dest.DesiredLayerCompression() == types.Compress && !isCompressed {
|
||||
logrus.Debugf("Compressing blob on the fly")
|
||||
compressionOperation = types.Compress
|
||||
pipeReader, pipeWriter := io.Pipe()
|
||||
defer pipeReader.Close()
|
||||
|
||||
@ -674,6 +685,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
inputInfo.Size = -1
|
||||
} else if canModifyBlob && c.dest.DesiredLayerCompression() == types.Decompress && isCompressed {
|
||||
logrus.Debugf("Blob will be decompressed")
|
||||
compressionOperation = types.Decompress
|
||||
s, err := decompressor(destStream)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
@ -684,6 +696,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
inputInfo.Size = -1
|
||||
} else {
|
||||
logrus.Debugf("Using original blob without modification")
|
||||
compressionOperation = types.PreserveOriginal
|
||||
inputInfo = srcInfo
|
||||
}
|
||||
|
||||
@ -699,7 +712,7 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
}
|
||||
|
||||
// === Finally, send the layer stream to dest.
|
||||
uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, isConfig)
|
||||
uploadedInfo, err := c.dest.PutBlob(ctx, destStream, inputInfo, c.blobInfoCache, isConfig)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error writing blob")
|
||||
}
|
||||
@ -722,6 +735,22 @@ func (c *copier) copyBlobFromStream(ctx context.Context, srcStream io.Reader, sr
|
||||
if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest {
|
||||
return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
|
||||
}
|
||||
if digestingReader.validationSucceeded {
|
||||
// If compressionOperation != types.PreserveOriginal, we now have two reliable digest values:
|
||||
// srcinfo.Digest describes the pre-compressionOperation input, verified by digestingReader
|
||||
// uploadedInfo.Digest describes the post-compressionOperation output, computed by PutBlob
|
||||
// (because inputInfo.Digest == "", this must have been computed afresh).
|
||||
switch compressionOperation {
|
||||
case types.PreserveOriginal:
|
||||
break // Do nothing, we have only one digest and we might not have even verified it.
|
||||
case types.Compress:
|
||||
c.blobInfoCache.RecordDigestUncompressedPair(uploadedInfo.Digest, srcInfo.Digest)
|
||||
case types.Decompress:
|
||||
c.blobInfoCache.RecordDigestUncompressedPair(srcInfo.Digest, uploadedInfo.Digest)
|
||||
default:
|
||||
return types.BlobInfo{}, errors.Errorf("Internal error: Unexpected compressionOperation value %#v", compressionOperation)
|
||||
}
|
||||
}
|
||||
return uploadedInfo, nil
|
||||
}
|
||||
|
||||
|
27
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
27
vendor/github.com/containers/image/directory/directory_dest.go
generated
vendored
@ -127,10 +127,11 @@ func (d *dirImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
blobFile, err := ioutil.TempFile(d.ref.path, "dir-put-blob")
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
@ -169,27 +170,27 @@ func (d *dirImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
|
||||
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (d *dirImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *dirImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
if info.Digest == "" {
|
||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
blobPath := d.ref.layerPath(info.Digest)
|
||||
finfo, err := os.Stat(blobPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return false, -1, nil
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
return true, finfo.Size(), nil
|
||||
}
|
||||
return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
|
||||
|
||||
func (d *dirImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination.
|
||||
|
4
vendor/github.com/containers/image/directory/directory_src.go
generated
vendored
4
vendor/github.com/containers/image/directory/directory_src.go
generated
vendored
@ -49,7 +49,9 @@ func (s *dirImageSource) GetManifest(ctx context.Context, instanceDigest *digest
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *dirImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
r, err := os.Open(s.ref.layerPath(info.Digest))
|
||||
if err != nil {
|
||||
return nil, -1, err
|
||||
|
23
vendor/github.com/containers/image/docker/cache.go
generated
vendored
Normal file
23
vendor/github.com/containers/image/docker/cache.go
generated
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/types"
|
||||
)
|
||||
|
||||
// bicTransportScope returns a BICTransportScope appropriate for ref.
|
||||
func bicTransportScope(ref dockerReference) types.BICTransportScope {
|
||||
// Blobs can be reused across the whole registry.
|
||||
return types.BICTransportScope{Opaque: reference.Domain(ref.ref)}
|
||||
}
|
||||
|
||||
// newBICLocationReference returns a BICLocationReference appropriate for ref.
|
||||
func newBICLocationReference(ref dockerReference) types.BICLocationReference {
|
||||
// Blobs are scoped to repositories (the tag/digest are not necessary to reuse a blob).
|
||||
return types.BICLocationReference{Opaque: ref.ref.Name()}
|
||||
}
|
||||
|
||||
// parseBICLocationReference returns a repository for encoded lr.
|
||||
func parseBICLocationReference(lr types.BICLocationReference) (reference.Named, error) {
|
||||
return reference.ParseNormalizedNamed(lr.Opaque)
|
||||
}
|
59
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
59
vendor/github.com/containers/image/docker/docker_client.go
generated
vendored
@ -70,10 +70,11 @@ type extensionSignatureList struct {
|
||||
}
|
||||
|
||||
type bearerToken struct {
|
||||
Token string `json:"token"`
|
||||
AccessToken string `json:"access_token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
IssuedAt time.Time `json:"issued_at"`
|
||||
Token string `json:"token"`
|
||||
AccessToken string `json:"access_token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
IssuedAt time.Time `json:"issued_at"`
|
||||
expirationTime time.Time
|
||||
}
|
||||
|
||||
// dockerClient is configuration for dealing with a single Docker registry.
|
||||
@ -88,14 +89,14 @@ type dockerClient struct {
|
||||
password string
|
||||
signatureBase signatureStorageBase
|
||||
scope authScope
|
||||
extraScope *authScope // If non-nil, a temporary extra token scope (necessary for mounting from another repo)
|
||||
// The following members are detected registry properties:
|
||||
// They are set after a successful detectProperties(), and never change afterwards.
|
||||
scheme string // Empty value also used to indicate detectProperties() has not yet succeeded.
|
||||
challenges []challenge
|
||||
supportsSignatures bool
|
||||
// The following members are private state for setupRequestAuth, both are valid if token != nil.
|
||||
token *bearerToken
|
||||
tokenExpiration time.Time
|
||||
// Private state for setupRequestAuth
|
||||
tokenCache map[string]bearerToken
|
||||
}
|
||||
|
||||
type authScope struct {
|
||||
@ -131,6 +132,7 @@ func newBearerTokenFromJSONBlob(blob []byte) (*bearerToken, error) {
|
||||
if token.IssuedAt.IsZero() {
|
||||
token.IssuedAt = time.Now().UTC()
|
||||
}
|
||||
token.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
|
||||
return token, nil
|
||||
}
|
||||
|
||||
@ -260,6 +262,7 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc
|
||||
registry: registry,
|
||||
client: &http.Client{Transport: tr},
|
||||
insecureSkipTLSVerify: skipVerify,
|
||||
tokenCache: map[string]bearerToken{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -463,24 +466,23 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
|
||||
req.SetBasicAuth(c.username, c.password)
|
||||
return nil
|
||||
case "bearer":
|
||||
if c.token == nil || time.Now().After(c.tokenExpiration) {
|
||||
realm, ok := challenge.Parameters["realm"]
|
||||
if !ok {
|
||||
return errors.Errorf("missing realm in bearer auth challenge")
|
||||
}
|
||||
service, _ := challenge.Parameters["service"] // Will be "" if not present
|
||||
var scope string
|
||||
if c.scope.remoteName != "" && c.scope.actions != "" {
|
||||
scope = fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions)
|
||||
}
|
||||
token, err := c.getBearerToken(req.Context(), realm, service, scope)
|
||||
cacheKey := ""
|
||||
scopes := []authScope{c.scope}
|
||||
if c.extraScope != nil {
|
||||
// Using ':' as a separator here is unambiguous because getBearerToken below uses the same separator when formatting a remote request (and because repository names can't contain colons).
|
||||
cacheKey = fmt.Sprintf("%s:%s", c.extraScope.remoteName, c.extraScope.actions)
|
||||
scopes = append(scopes, *c.extraScope)
|
||||
}
|
||||
token, ok := c.tokenCache[cacheKey]
|
||||
if !ok || time.Now().After(token.expirationTime) {
|
||||
t, err := c.getBearerToken(req.Context(), challenge, scopes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.token = token
|
||||
c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
|
||||
token = *t
|
||||
c.tokenCache[cacheKey] = token
|
||||
}
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token))
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token.Token))
|
||||
return nil
|
||||
default:
|
||||
logrus.Debugf("no handler for %s authentication", challenge.Scheme)
|
||||
@ -490,7 +492,12 @@ func (c *dockerClient) setupRequestAuth(req *http.Request) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope string) (*bearerToken, error) {
|
||||
func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, scopes []authScope) (*bearerToken, error) {
|
||||
realm, ok := challenge.Parameters["realm"]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("missing realm in bearer auth challenge")
|
||||
}
|
||||
|
||||
authReq, err := http.NewRequest("GET", realm, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -500,11 +507,13 @@ func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope
|
||||
if c.username != "" {
|
||||
getParams.Add("account", c.username)
|
||||
}
|
||||
if service != "" {
|
||||
if service, ok := challenge.Parameters["service"]; ok && service != "" {
|
||||
getParams.Add("service", service)
|
||||
}
|
||||
if scope != "" {
|
||||
getParams.Add("scope", scope)
|
||||
for _, scope := range scopes {
|
||||
if scope.remoteName != "" && scope.actions != "" {
|
||||
getParams.Add("scope", fmt.Sprintf("repository:%s:%s", scope.remoteName, scope.actions))
|
||||
}
|
||||
}
|
||||
authReq.URL.RawQuery = getParams.Encode()
|
||||
if c.username != "" && c.password != "" {
|
||||
|
159
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
159
vendor/github.com/containers/image/docker/docker_image_dest.go
generated
vendored
@ -15,6 +15,7 @@ import (
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/blobinfocache"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/docker/distribution/registry/api/errcode"
|
||||
"github.com/docker/distribution/registry/api/v2"
|
||||
@ -113,17 +114,21 @@ func (c *sizeCounter) Write(p []byte) (n int, err error) {
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
if inputInfo.Digest.String() != "" {
|
||||
haveBlob, size, err := d.HasBlob(ctx, inputInfo)
|
||||
// This should not really be necessary, at least the copy code calls TryReusingBlob automatically.
|
||||
// Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value.
|
||||
// But we do that with NoCache, so that it _only_ checks the primary destination, instead of trying all mount candidates _again_.
|
||||
haveBlob, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, blobinfocache.NoCache, false)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
if haveBlob {
|
||||
return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
|
||||
return reusedInfo, nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -160,7 +165,7 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
|
||||
}
|
||||
|
||||
// FIXME: DELETE uploadLocation on failure
|
||||
// FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope)
|
||||
|
||||
locationQuery := uploadLocation.Query()
|
||||
// TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
|
||||
@ -177,19 +182,15 @@ func (d *dockerImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
}
|
||||
|
||||
logrus.Debugf("Upload of layer %s complete", computedDigest)
|
||||
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), computedDigest, newBICLocationReference(d.ref))
|
||||
return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// blobExists returns true iff repo contains a blob with digest, and if so, also its size.
|
||||
// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
if info.Digest == "" {
|
||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
checkPath := fmt.Sprintf(blobsPath, reference.Path(d.ref.ref), info.Digest.String())
|
||||
|
||||
func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest) (bool, int64, error) {
|
||||
checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String())
|
||||
logrus.Debugf("Checking %s", checkPath)
|
||||
res, err := d.c.makeRequest(ctx, "HEAD", checkPath, nil, nil, v2Auth)
|
||||
if err != nil {
|
||||
@ -202,7 +203,7 @@ func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInf
|
||||
return true, getBlobSize(res), nil
|
||||
case http.StatusUnauthorized:
|
||||
logrus.Debugf("... not authorized")
|
||||
return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", info.Digest, d.ref.ref.Name())
|
||||
return false, -1, errors.Wrapf(client.HandleErrorResponse(res), "Error checking whether a blob %s exists in %s", digest, repo.Name())
|
||||
case http.StatusNotFound:
|
||||
logrus.Debugf("... not present")
|
||||
return false, -1, nil
|
||||
@ -211,8 +212,134 @@ func (d *dockerImageDestination) HasBlob(ctx context.Context, info types.BlobInf
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dockerImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return info, nil
|
||||
// mountBlob tries to mount blob srcDigest from srcRepo to the current destination.
|
||||
func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest) error {
|
||||
u := url.URL{
|
||||
Path: fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)),
|
||||
RawQuery: url.Values{
|
||||
"mount": {srcDigest.String()},
|
||||
"from": {reference.Path(srcRepo)},
|
||||
}.Encode(),
|
||||
}
|
||||
mountPath := u.String()
|
||||
logrus.Debugf("Trying to mount %s", mountPath)
|
||||
res, err := d.c.makeRequest(ctx, "POST", mountPath, nil, nil, v2Auth)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
switch res.StatusCode {
|
||||
case http.StatusCreated:
|
||||
logrus.Debugf("... mount OK")
|
||||
return nil
|
||||
case http.StatusAccepted:
|
||||
// Oops, the mount was ignored - either the registry does not support that yet, or the blob does not exist; the registry has started an ordinary upload process.
|
||||
// Abort, and let the ultimate caller do an upload when its ready, instead.
|
||||
// NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested.
|
||||
uploadLocation, err := res.Location()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error determining upload URL after a mount attempt")
|
||||
}
|
||||
logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.String())
|
||||
res2, err := d.c.makeRequestToResolvedURL(ctx, "DELETE", uploadLocation.String(), nil, nil, -1, v2Auth)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err)
|
||||
} else {
|
||||
defer res2.Body.Close()
|
||||
if res2.StatusCode != http.StatusNoContent {
|
||||
logrus.Debugf("Error trying to cancel an inadvertent upload, status %s", http.StatusText(res.StatusCode))
|
||||
}
|
||||
}
|
||||
// Anyway, if canceling the upload fails, ignore it and return the more important error:
|
||||
return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name())
|
||||
default:
|
||||
logrus.Debugf("Error mounting, response %#v", *res)
|
||||
return errors.Wrapf(client.HandleErrorResponse(res), "Error mounting %s from %s to %s", srcDigest, srcRepo.Name(), d.ref.ref.Name())
|
||||
}
|
||||
}
|
||||
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *dockerImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
if info.Digest == "" {
|
||||
return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
|
||||
// First, check whether the blob happens to already exist at the destination.
|
||||
exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest)
|
||||
if err != nil {
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
if exists {
|
||||
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref))
|
||||
return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil
|
||||
}
|
||||
|
||||
// Then try reusing blobs from other locations.
|
||||
|
||||
// Checking candidateRepo, and mounting from it, requires an expanded token scope.
|
||||
// We still want to reuse the ping information and other aspects of the client, so rather than make a fresh copy, there is this a bit ugly extraScope hack.
|
||||
if d.c.extraScope != nil {
|
||||
return false, types.BlobInfo{}, errors.New("Internal error: dockerClient.extraScope was set before TryReusingBlob")
|
||||
}
|
||||
defer func() {
|
||||
d.c.extraScope = nil
|
||||
}()
|
||||
for _, candidate := range cache.CandidateLocations(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, canSubstitute) {
|
||||
candidateRepo, err := parseBICLocationReference(candidate.Location)
|
||||
if err != nil {
|
||||
logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err)
|
||||
continue
|
||||
}
|
||||
logrus.Debugf("Trying to reuse cached location %s in %s", candidate.Digest.String(), candidateRepo.Name())
|
||||
|
||||
// Sanity checks:
|
||||
if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) {
|
||||
logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref))
|
||||
continue
|
||||
}
|
||||
if candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest {
|
||||
logrus.Debug("... Already tried the primary destination")
|
||||
continue
|
||||
}
|
||||
|
||||
// Whatever happens here, don't abort the entire operation. It's likely we just don't have permissions, and if it is a critical network error, we will find out soon enough anyway.
|
||||
d.c.extraScope = &authScope{
|
||||
remoteName: reference.Path(candidateRepo),
|
||||
actions: "pull",
|
||||
}
|
||||
// This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead.
|
||||
// But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel.
|
||||
// So, without this existence check, it would be 1 request on success, 2 requests on failure; with it, it is 2 requests on success, 1 request on failure.
|
||||
// On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly.
|
||||
// Even worse, docker/distribution does not actually reasonably implement canceling uploads
|
||||
// (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask);
|
||||
// so, be a nice client and don't create unnecesary upload sessions on the server.
|
||||
exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest)
|
||||
if err != nil {
|
||||
logrus.Debugf("... Failed: %v", err)
|
||||
continue
|
||||
}
|
||||
if !exists {
|
||||
// FIXME? Should we drop the blob from cache here (and elsewhere?)?
|
||||
continue // logrus.Debug() already happened in blobExists
|
||||
}
|
||||
if candidateRepo.Name() != d.ref.ref.Name() {
|
||||
if err := d.mountBlob(ctx, candidateRepo, candidate.Digest); err != nil {
|
||||
logrus.Debugf("... Mount failed: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref))
|
||||
return true, types.BlobInfo{Digest: candidate.Digest, Size: size}, nil
|
||||
}
|
||||
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination.
|
||||
|
5
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
5
vendor/github.com/containers/image/docker/docker_image_src.go
generated
vendored
@ -162,7 +162,9 @@ func getBlobSize(resp *http.Response) int64 {
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
if len(info.URLs) != 0 {
|
||||
return s.getExternalBlob(ctx, info.URLs)
|
||||
}
|
||||
@ -177,6 +179,7 @@ func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (i
|
||||
// print url also
|
||||
return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d (%s)", res.StatusCode, http.StatusText(res.StatusCode))
|
||||
}
|
||||
cache.RecordKnownLocation(s.ref.Transport(), bicTransportScope(s.ref), info.Digest, newBICLocationReference(s.ref))
|
||||
return res.Body, getBlobSize(res), nil
|
||||
}
|
||||
|
||||
|
37
vendor/github.com/containers/image/docker/tarfile/dest.go
generated
vendored
37
vendor/github.com/containers/image/docker/tarfile/dest.go
generated
vendored
@ -85,10 +85,11 @@ func (d *Destination) IgnoresEmbeddedDockerReference() bool {
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
// Ouch, we need to stream the blob into a temporary file just to determine the size.
|
||||
// When the layer is decompressed, we also have to generate the digest on uncompressed datas.
|
||||
if inputInfo.Size == -1 || inputInfo.Digest.String() == "" {
|
||||
@ -120,12 +121,12 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
|
||||
}
|
||||
|
||||
// Maybe the blob has been already sent
|
||||
ok, size, err := d.HasBlob(ctx, inputInfo)
|
||||
ok, reusedInfo, err := d.TryReusingBlob(ctx, inputInfo, cache, false)
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
}
|
||||
if ok {
|
||||
return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
|
||||
return reusedInfo, nil
|
||||
}
|
||||
|
||||
if isConfig {
|
||||
@ -151,29 +152,21 @@ func (d *Destination) PutBlob(ctx context.Context, stream io.Reader, inputInfo t
|
||||
return types.BlobInfo{Digest: inputInfo.Digest, Size: inputInfo.Size}, nil
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with
|
||||
// the matching digest which can be reapplied using ReapplyBlob. Unlike
|
||||
// PutBlob, the digest can not be empty. If HasBlob returns true, the size of
|
||||
// the blob must also be returned. If the destination does not contain the
|
||||
// blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); it
|
||||
// returns a non-nil error only on an unexpected failure.
|
||||
func (d *Destination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *Destination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
if info.Digest == "" {
|
||||
return false, -1, errors.Errorf("Can not check for a blob with unknown digest")
|
||||
return false, types.BlobInfo{}, errors.Errorf("Can not check for a blob with unknown digest")
|
||||
}
|
||||
if blob, ok := d.blobs[info.Digest]; ok {
|
||||
return true, blob.Size, nil
|
||||
return true, types.BlobInfo{Digest: info.Digest, Size: blob.Size}, nil
|
||||
}
|
||||
return false, -1, nil
|
||||
}
|
||||
|
||||
// ReapplyBlob informs the image destination that a blob for which HasBlob
|
||||
// previously returned true would have been passed to PutBlob if it had
|
||||
// returned false. Like HasBlob and unlike PutBlob, the digest can not be
|
||||
// empty. If the blob is a filesystem layer, this signifies that the changes
|
||||
// it describes need to be applied again when composing a filesystem tree.
|
||||
func (d *Destination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return info, nil
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
|
||||
func (d *Destination) createRepositoriesFile(rootLayerID string) error {
|
||||
|
4
vendor/github.com/containers/image/docker/tarfile/src.go
generated
vendored
4
vendor/github.com/containers/image/docker/tarfile/src.go
generated
vendored
@ -398,7 +398,9 @@ func (r uncompressedReadCloser) Close() error {
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *Source) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
if err := s.ensureCachedDataIsPresent(); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
7
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
7
vendor/github.com/containers/image/image/docker_schema2.go
generated
vendored
@ -11,6 +11,7 @@ import (
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/blobinfocache"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@ -95,7 +96,7 @@ func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) {
|
||||
if m.src == nil {
|
||||
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
|
||||
}
|
||||
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor))
|
||||
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), blobinfocache.NoCache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -249,7 +250,9 @@ func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, dest typ
|
||||
if historyEntry.EmptyLayer {
|
||||
if !haveGzippedEmptyLayer {
|
||||
logrus.Debugf("Uploading empty layer during conversion to schema 1")
|
||||
info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, false)
|
||||
// Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here,
|
||||
// and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it.
|
||||
info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))}, blobinfocache.NoCache, false)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error uploading empty layer")
|
||||
}
|
||||
|
3
vendor/github.com/containers/image/image/oci.go
generated
vendored
3
vendor/github.com/containers/image/image/oci.go
generated
vendored
@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/containers/image/docker/reference"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/blobinfocache"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@ -60,7 +61,7 @@ func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) {
|
||||
if m.src == nil {
|
||||
return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
|
||||
}
|
||||
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config))
|
||||
stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), blobinfocache.NoCache)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
27
vendor/github.com/containers/image/oci/archive/oci_dest.go
generated
vendored
27
vendor/github.com/containers/image/oci/archive/oci_dest.go
generated
vendored
@ -77,20 +77,27 @@ func (d *ociArchiveImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return d.unpackedDest.IgnoresEmbeddedDockerReference()
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// PutBlob writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
return d.unpackedDest.PutBlob(ctx, stream, inputInfo, isConfig)
|
||||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *ociArchiveImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
return d.unpackedDest.PutBlob(ctx, stream, inputInfo, cache, isConfig)
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob
|
||||
func (d *ociArchiveImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
return d.unpackedDest.HasBlob(ctx, info)
|
||||
}
|
||||
|
||||
func (d *ociArchiveImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return d.unpackedDest.ReapplyBlob(ctx, info)
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *ociArchiveImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
return d.unpackedDest.TryReusingBlob(ctx, info, cache, canSubstitute)
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination
|
||||
|
8
vendor/github.com/containers/image/oci/archive/oci_src.go
generated
vendored
8
vendor/github.com/containers/image/oci/archive/oci_src.go
generated
vendored
@ -76,9 +76,11 @@ func (s *ociArchiveImageSource) GetManifest(ctx context.Context, instanceDigest
|
||||
return s.unpackedSrc.GetManifest(ctx, instanceDigest)
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob's size.
|
||||
func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
return s.unpackedSrc.GetBlob(ctx, info)
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *ociArchiveImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
return s.unpackedSrc.GetBlob(ctx, info, cache)
|
||||
}
|
||||
|
||||
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
||||
|
33
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
33
vendor/github.com/containers/image/oci/layout/oci_dest.go
generated
vendored
@ -107,13 +107,15 @@ func (d *ociImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // N/A, DockerReference() returns nil.
|
||||
}
|
||||
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// PutBlob writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob")
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
@ -173,30 +175,29 @@ func (d *ociImageDestination) PutBlob(ctx context.Context, stream io.Reader, inp
|
||||
return types.BlobInfo{Digest: computedDigest, Size: size}, nil
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (d *ociImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *ociImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
if info.Digest == "" {
|
||||
return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
return false, types.BlobInfo{}, errors.Errorf(`"Can not check for a blob with unknown digest`)
|
||||
}
|
||||
blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir)
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
finfo, err := os.Stat(blobPath)
|
||||
if err != nil && os.IsNotExist(err) {
|
||||
return false, -1, nil
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
return true, finfo.Size(), nil
|
||||
}
|
||||
|
||||
func (d *ociImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return info, nil
|
||||
return true, types.BlobInfo{Digest: info.Digest, Size: finfo.Size()}, nil
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination.
|
||||
|
6
vendor/github.com/containers/image/oci/layout/oci_src.go
generated
vendored
6
vendor/github.com/containers/image/oci/layout/oci_src.go
generated
vendored
@ -92,8 +92,10 @@ func (s *ociImageSource) GetManifest(ctx context.Context, instanceDigest *digest
|
||||
return m, mimeType, nil
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob's size.
|
||||
func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *ociImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
if len(info.URLs) != 0 {
|
||||
return s.getExternalBlob(ctx, info.URLs)
|
||||
}
|
||||
|
30
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
30
vendor/github.com/containers/image/openshift/openshift.go
generated
vendored
@ -212,11 +212,13 @@ func (s *openshiftImageSource) GetManifest(ctx context.Context, instanceDigest *
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *openshiftImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
if err := s.ensureImageIsResolved(ctx); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
return s.docker.GetBlob(ctx, info)
|
||||
return s.docker.GetBlob(ctx, info, cache)
|
||||
}
|
||||
|
||||
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
||||
@ -379,23 +381,23 @@ func (d *openshiftImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
return d.docker.PutBlob(ctx, stream, inputInfo, isConfig)
|
||||
func (d *openshiftImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
return d.docker.PutBlob(ctx, stream, inputInfo, cache, isConfig)
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (d *openshiftImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
return d.docker.HasBlob(ctx, info)
|
||||
}
|
||||
|
||||
func (d *openshiftImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return d.docker.ReapplyBlob(ctx, info)
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *openshiftImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
return d.docker.TryReusingBlob(ctx, info, cache, canSubstitute)
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination.
|
||||
|
36
vendor/github.com/containers/image/ostree/ostree_dest.go
generated
vendored
36
vendor/github.com/containers/image/ostree/ostree_dest.go
generated
vendored
@ -132,7 +132,15 @@ func (d *ostreeImageDestination) IgnoresEmbeddedDockerReference() bool {
|
||||
return false // N/A, DockerReference() returns nil.
|
||||
}
|
||||
|
||||
func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
// PutBlob writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (d *ostreeImageDestination) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
tmpDir, err := ioutil.TempDir(d.tmpDirPath, "blob")
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, err
|
||||
@ -322,12 +330,18 @@ func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobTo
|
||||
return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)})
|
||||
}
|
||||
|
||||
func (d *ostreeImageDestination) HasBlob(ctx context.Context, info types.BlobInfo) (bool, int64, error) {
|
||||
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (d *ostreeImageDestination) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
if d.repo == nil {
|
||||
repo, err := openRepo(d.ref.repo)
|
||||
if err != nil {
|
||||
return false, 0, err
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
d.repo = repo
|
||||
}
|
||||
@ -335,29 +349,25 @@ func (d *ostreeImageDestination) HasBlob(ctx context.Context, info types.BlobInf
|
||||
|
||||
found, data, err := readMetadata(d.repo, branch, "docker.uncompressed_digest")
|
||||
if err != nil || !found {
|
||||
return found, -1, err
|
||||
return found, types.BlobInfo{}, err
|
||||
}
|
||||
|
||||
found, data, err = readMetadata(d.repo, branch, "docker.uncompressed_size")
|
||||
if err != nil || !found {
|
||||
return found, -1, err
|
||||
return found, types.BlobInfo{}, err
|
||||
}
|
||||
|
||||
found, data, err = readMetadata(d.repo, branch, "docker.size")
|
||||
if err != nil || !found {
|
||||
return found, -1, err
|
||||
return found, types.BlobInfo{}, err
|
||||
}
|
||||
|
||||
size, err := strconv.ParseInt(data, 10, 64)
|
||||
if err != nil {
|
||||
return false, -1, err
|
||||
return false, types.BlobInfo{}, err
|
||||
}
|
||||
|
||||
return true, size, nil
|
||||
}
|
||||
|
||||
func (d *ostreeImageDestination) ReapplyBlob(ctx context.Context, info types.BlobInfo) (types.BlobInfo, error) {
|
||||
return info, nil
|
||||
return true, types.BlobInfo{Digest: info.Digest, Size: size}, nil
|
||||
}
|
||||
|
||||
// PutManifest writes manifest to the destination.
|
||||
|
6
vendor/github.com/containers/image/ostree/ostree_src.go
generated
vendored
6
vendor/github.com/containers/image/ostree/ostree_src.go
generated
vendored
@ -255,8 +255,10 @@ func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser,
|
||||
return getter.Get(path)
|
||||
}
|
||||
|
||||
// GetBlob returns a stream for the specified blob, and the blob's size.
|
||||
func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *ostreeImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
|
||||
blob := info.Digest.Hex()
|
||||
|
||||
|
329
vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go
generated
vendored
Normal file
329
vendor/github.com/containers/image/pkg/blobinfocache/boltdb.go
generated
vendored
Normal file
@ -0,0 +1,329 @@
|
||||
package blobinfocache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
// NOTE: There is no versioning data inside the file; this is a “cache”, so on an incompatible format upgrade
|
||||
// we can simply start over with a different filename; update blobInfoCacheFilename.
|
||||
|
||||
// FIXME: For CRI-O, does this need to hide information between different users?
|
||||
|
||||
// uncompressedDigestBucket stores a mapping from any digest to an uncompressed digest.
|
||||
uncompressedDigestBucket = []byte("uncompressedDigest")
|
||||
// digestByUncompressedBucket stores a bucket per uncompressed digest, with the bucket containing a set of digests for that uncompressed digest
|
||||
// (as a set of key=digest, value="" pairs)
|
||||
digestByUncompressedBucket = []byte("digestByUncompressed")
|
||||
// knownLocationsBucket stores a nested structure of buckets, keyed by (transport name, scope string, blob digest), ultimately containing
|
||||
// a bucket of (opaque location reference, BinaryMarshaller-encoded time.Time value).
|
||||
knownLocationsBucket = []byte("knownLocations")
|
||||
)
|
||||
|
||||
// Concurrency:
|
||||
// See https://www.sqlite.org/src/artifact/c230a7a24?ln=994-1081 for all the issues with locks, which make it extremely
|
||||
// difficult to use a single BoltDB file from multiple threads/goroutines inside a process. So, we punt and only allow one at a time.
|
||||
|
||||
// pathLock contains a lock for a specific BoltDB database path.
|
||||
type pathLock struct {
|
||||
refCount int64 // Number of threads/goroutines owning or waiting on this lock. Protected by global pathLocksMutex, NOT by the mutex field below!
|
||||
mutex sync.Mutex // Owned by the thread/goroutine allowed to access the BoltDB database.
|
||||
}
|
||||
|
||||
var (
|
||||
// pathLocks contains a lock for each currently open file.
|
||||
// This must be global so that independently created instances of boltDBCache exclude each other.
|
||||
// The map is protected by pathLocksMutex.
|
||||
// FIXME? Should this be based on device:inode numbers instead of paths instead?
|
||||
pathLocks = map[string]*pathLock{}
|
||||
pathLocksMutex = sync.Mutex{}
|
||||
)
|
||||
|
||||
// lockPath obtains the pathLock for path.
|
||||
// The caller must call unlockPath eventually.
|
||||
func lockPath(path string) {
|
||||
pl := func() *pathLock { // A scope for defer
|
||||
pathLocksMutex.Lock()
|
||||
defer pathLocksMutex.Unlock()
|
||||
pl, ok := pathLocks[path]
|
||||
if ok {
|
||||
pl.refCount++
|
||||
} else {
|
||||
pl = &pathLock{refCount: 1, mutex: sync.Mutex{}}
|
||||
pathLocks[path] = pl
|
||||
}
|
||||
return pl
|
||||
}()
|
||||
pl.mutex.Lock()
|
||||
}
|
||||
|
||||
// unlockPath releases the pathLock for path.
|
||||
func unlockPath(path string) {
|
||||
pathLocksMutex.Lock()
|
||||
defer pathLocksMutex.Unlock()
|
||||
pl, ok := pathLocks[path]
|
||||
if !ok {
|
||||
// Should this return an error instead? BlobInfoCache ultimately ignores errors…
|
||||
panic(fmt.Sprintf("Internal error: unlocking nonexistent lock for path %s", path))
|
||||
}
|
||||
pl.mutex.Unlock()
|
||||
pl.refCount--
|
||||
if pl.refCount == 0 {
|
||||
delete(pathLocks, path)
|
||||
}
|
||||
}
|
||||
|
||||
// boltDBCache si a BlobInfoCache implementation which uses a BoltDB file at the specified path.
|
||||
//
|
||||
// Note that we don’t keep the database open across operations, because that would lock the file and block any other
|
||||
// users; instead, we need to open/close it for every single write or lookup.
|
||||
type boltDBCache struct {
|
||||
path string
|
||||
}
|
||||
|
||||
// NewBoltDBCache returns a BlobInfoCache implementation which uses a BoltDB file at path.
|
||||
// Most users should call DefaultCache instead.
|
||||
func NewBoltDBCache(path string) types.BlobInfoCache {
|
||||
return &boltDBCache{path: path}
|
||||
}
|
||||
|
||||
// view returns runs the specified fn within a read-only transaction on the database.
|
||||
func (bdc *boltDBCache) view(fn func(tx *bolt.Tx) error) (retErr error) {
|
||||
// bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true}) will, if the file does not exist,
|
||||
// nevertheless create it, but with an O_RDONLY file descriptor, try to initialize it, and fail — while holding
|
||||
// a read lock, blocking any future writes.
|
||||
// Hence this preliminary check, which is RACY: Another process could remove the file
|
||||
// between the Lstat call and opening the database.
|
||||
if _, err := os.Lstat(bdc.path); err != nil && os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
lockPath(bdc.path)
|
||||
defer unlockPath(bdc.path)
|
||||
db, err := bolt.Open(bdc.path, 0600, &bolt.Options{ReadOnly: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := db.Close(); retErr == nil && err != nil {
|
||||
retErr = err
|
||||
}
|
||||
}()
|
||||
|
||||
return db.View(fn)
|
||||
}
|
||||
|
||||
// update returns runs the specified fn within a read-write transaction on the database.
|
||||
func (bdc *boltDBCache) update(fn func(tx *bolt.Tx) error) (retErr error) {
|
||||
lockPath(bdc.path)
|
||||
defer unlockPath(bdc.path)
|
||||
db, err := bolt.Open(bdc.path, 0600, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := db.Close(); retErr == nil && err != nil {
|
||||
retErr = err
|
||||
}
|
||||
}()
|
||||
|
||||
return db.Update(fn)
|
||||
}
|
||||
|
||||
// uncompressedDigest implements BlobInfoCache.UncompressedDigest within the provided read-only transaction.
|
||||
func (bdc *boltDBCache) uncompressedDigest(tx *bolt.Tx, anyDigest digest.Digest) digest.Digest {
|
||||
if b := tx.Bucket(uncompressedDigestBucket); b != nil {
|
||||
if uncompressedBytes := b.Get([]byte(anyDigest.String())); uncompressedBytes != nil {
|
||||
d, err := digest.Parse(string(uncompressedBytes))
|
||||
if err == nil {
|
||||
return d
|
||||
}
|
||||
// FIXME? Log err (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
}
|
||||
// Presence in digestsByUncompressedBucket implies that anyDigest must already refer to an uncompressed digest.
|
||||
// This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
|
||||
// when we already record a (compressed, uncompressed) pair.
|
||||
if b := tx.Bucket(digestByUncompressedBucket); b != nil {
|
||||
if b = b.Bucket([]byte(anyDigest.String())); b != nil {
|
||||
c := b.Cursor()
|
||||
if k, _ := c.First(); k != nil { // The bucket is non-empty
|
||||
return anyDigest
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||
// May return anyDigest if it is known to be uncompressed.
|
||||
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||
func (bdc *boltDBCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
||||
var res digest.Digest
|
||||
if err := bdc.view(func(tx *bolt.Tx) error {
|
||||
res = bdc.uncompressedDigest(tx, anyDigest)
|
||||
return nil
|
||||
}); err != nil { // Including os.IsNotExist(err)
|
||||
return "" // FIXME? Log err (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
|
||||
// It’s allowed for anyDigest == uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (bdc *boltDBCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
|
||||
_ = bdc.update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucketIfNotExists(uncompressedDigestBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
key := []byte(anyDigest.String())
|
||||
if previousBytes := b.Get(key); previousBytes != nil {
|
||||
previous, err := digest.Parse(string(previousBytes))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if previous != uncompressed {
|
||||
logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
|
||||
}
|
||||
}
|
||||
if err := b.Put(key, []byte(uncompressed.String())); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b, err = tx.CreateBucketIfNotExists(digestByUncompressedBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err = b.CreateBucketIfNotExists([]byte(uncompressed.String()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := b.Put([]byte(anyDigest.String()), []byte{}); err != nil { // Possibly writing the same []byte{} presence marker again.
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
func (bdc *boltDBCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
||||
_ = bdc.update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucketIfNotExists(knownLocationsBucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err = b.CreateBucketIfNotExists([]byte(transport.Name()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err = b.CreateBucketIfNotExists([]byte(scope.Opaque))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err = b.CreateBucketIfNotExists([]byte(blobDigest.String()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
value, err := time.Now().MarshalBinary()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := b.Put([]byte(location.Opaque), value); err != nil { // Possibly overwriting an older entry.
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
// appendReplacementCandiates creates candidateWithTime values for digest in scopeBucket, and returns the result of appending them to candidates.
|
||||
func (bdc *boltDBCache) appendReplacementCandidates(candidates []candidateWithTime, scopeBucket *bolt.Bucket, digest digest.Digest) []candidateWithTime {
|
||||
b := scopeBucket.Bucket([]byte(digest.String()))
|
||||
if b == nil {
|
||||
return candidates
|
||||
}
|
||||
_ = b.ForEach(func(k, v []byte) error {
|
||||
t := time.Time{}
|
||||
if err := t.UnmarshalBinary(v); err != nil {
|
||||
return err
|
||||
}
|
||||
candidates = append(candidates, candidateWithTime{
|
||||
candidate: types.BICReplacementCandidate{
|
||||
Digest: digest,
|
||||
Location: types.BICLocationReference{Opaque: string(k)},
|
||||
},
|
||||
lastSeen: t,
|
||||
})
|
||||
return nil
|
||||
}) // FIXME? Log error (but throttle the log volume on repeated accesses)?
|
||||
return candidates
|
||||
}
|
||||
|
||||
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (bdc *boltDBCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
res := []candidateWithTime{}
|
||||
var uncompressedDigestValue digest.Digest // = ""
|
||||
if err := bdc.view(func(tx *bolt.Tx) error {
|
||||
scopeBucket := tx.Bucket(knownLocationsBucket)
|
||||
if scopeBucket == nil {
|
||||
return nil
|
||||
}
|
||||
scopeBucket = scopeBucket.Bucket([]byte(transport.Name()))
|
||||
if scopeBucket == nil {
|
||||
return nil
|
||||
}
|
||||
scopeBucket = scopeBucket.Bucket([]byte(scope.Opaque))
|
||||
if scopeBucket == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
res = bdc.appendReplacementCandidates(res, scopeBucket, primaryDigest)
|
||||
if canSubstitute {
|
||||
if uncompressedDigestValue = bdc.uncompressedDigest(tx, primaryDigest); uncompressedDigestValue != "" {
|
||||
b := tx.Bucket(digestByUncompressedBucket)
|
||||
if b != nil {
|
||||
b = b.Bucket([]byte(uncompressedDigestValue.String()))
|
||||
if b != nil {
|
||||
if err := b.ForEach(func(k, _ []byte) error {
|
||||
d, err := digest.Parse(string(k))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d != primaryDigest && d != uncompressedDigestValue {
|
||||
res = bdc.appendReplacementCandidates(res, scopeBucket, d)
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if uncompressedDigestValue != primaryDigest {
|
||||
res = bdc.appendReplacementCandidates(res, scopeBucket, uncompressedDigestValue)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}); err != nil { // Including os.IsNotExist(err)
|
||||
return []types.BICReplacementCandidate{} // FIXME? Log err (but throttle the log volume on repeated accesses)?
|
||||
}
|
||||
|
||||
return destructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigestValue)
|
||||
}
|
63
vendor/github.com/containers/image/pkg/blobinfocache/default.go
generated
vendored
Normal file
63
vendor/github.com/containers/image/pkg/blobinfocache/default.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
||||
package blobinfocache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
// blobInfoCacheFilename is the file name used for blob info caches.
|
||||
// If the format changes in an incompatible way, increase the version number.
|
||||
blobInfoCacheFilename = "blob-info-cache-v1.boltdb"
|
||||
// systemBlobInfoCacheDir is the directory containing the blob info cache (in blobInfocacheFilename) for root-running processes.
|
||||
systemBlobInfoCacheDir = "/var/lib/containers/cache"
|
||||
)
|
||||
|
||||
// blobInfoCacheDir returns a path to a blob info cache appropripate for sys and euid.
|
||||
// euid is used so that (sudo …) does not write root-owned files into the unprivileged users’ home directory.
|
||||
func blobInfoCacheDir(sys *types.SystemContext, euid int) (string, error) {
|
||||
if sys != nil && sys.BlobInfoCacheDir != "" {
|
||||
return sys.BlobInfoCacheDir, nil
|
||||
}
|
||||
|
||||
// FIXME? On Windows, os.Geteuid() returns -1. What should we do? Right now we treat it as unprivileged
|
||||
// and fail (fall back to memory-only) if neither HOME nor XDG_DATA_HOME is set, which is, at least, safe.
|
||||
if euid == 0 {
|
||||
if sys != nil && sys.RootForImplicitAbsolutePaths != "" {
|
||||
return filepath.Join(sys.RootForImplicitAbsolutePaths, systemBlobInfoCacheDir), nil
|
||||
}
|
||||
return systemBlobInfoCacheDir, nil
|
||||
}
|
||||
|
||||
// This is intended to mirror the GraphRoot determination in github.com/containers/libpod/pkg/util.GetRootlessStorageOpts.
|
||||
dataDir := os.Getenv("XDG_DATA_HOME")
|
||||
if dataDir == "" {
|
||||
home := os.Getenv("HOME")
|
||||
if home == "" {
|
||||
return "", fmt.Errorf("neither XDG_DATA_HOME nor HOME was set non-empty")
|
||||
}
|
||||
dataDir = filepath.Join(home, ".local", "share")
|
||||
}
|
||||
return filepath.Join(dataDir, "containers", "cache"), nil
|
||||
}
|
||||
|
||||
// DefaultCache returns the default BlobInfoCache implementation appropriate for sys.
|
||||
func DefaultCache(sys *types.SystemContext) types.BlobInfoCache {
|
||||
dir, err := blobInfoCacheDir(sys, os.Geteuid())
|
||||
if err != nil {
|
||||
logrus.Debugf("Error determining a location for %s, using a memory-only cache", blobInfoCacheFilename)
|
||||
return NewMemoryCache()
|
||||
}
|
||||
path := filepath.Join(dir, blobInfoCacheFilename)
|
||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||
logrus.Debugf("Error creating parent directories for %s, using a memory-only cache: %v", err)
|
||||
return NewMemoryCache()
|
||||
}
|
||||
|
||||
logrus.Debugf("Using blob info cache at %s", path)
|
||||
return NewBoltDBCache(path)
|
||||
}
|
123
vendor/github.com/containers/image/pkg/blobinfocache/memory.go
generated
vendored
Normal file
123
vendor/github.com/containers/image/pkg/blobinfocache/memory.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
package blobinfocache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// locationKey only exists to make lookup in knownLocations easier.
|
||||
type locationKey struct {
|
||||
transport string
|
||||
scope types.BICTransportScope
|
||||
blobDigest digest.Digest
|
||||
}
|
||||
|
||||
// memoryCache implements an in-memory-only BlobInfoCache
|
||||
type memoryCache struct {
|
||||
uncompressedDigests map[digest.Digest]digest.Digest
|
||||
digestsByUncompressed map[digest.Digest]map[digest.Digest]struct{} // stores a set of digests for each uncompressed digest
|
||||
knownLocations map[locationKey]map[types.BICLocationReference]time.Time // stores last known existence time for each location reference
|
||||
}
|
||||
|
||||
// NewMemoryCache returns a BlobInfoCache implementation which is in-memory only.
|
||||
// This is primarily intended for tests, but also used as a fallback if DefaultCache
|
||||
// can’t determine, or set up, the location for a persistent cache.
|
||||
// Manual users of types.{ImageSource,ImageDestination} might also use this instead of a persistent cache.
|
||||
func NewMemoryCache() types.BlobInfoCache {
|
||||
return &memoryCache{
|
||||
uncompressedDigests: map[digest.Digest]digest.Digest{},
|
||||
digestsByUncompressed: map[digest.Digest]map[digest.Digest]struct{}{},
|
||||
knownLocations: map[locationKey]map[types.BICLocationReference]time.Time{},
|
||||
}
|
||||
}
|
||||
|
||||
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||
// May return anyDigest if it is known to be uncompressed.
|
||||
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||
func (mem *memoryCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
||||
if d, ok := mem.uncompressedDigests[anyDigest]; ok {
|
||||
return d
|
||||
}
|
||||
// Presence in digestsByUncompressed implies that anyDigest must already refer to an uncompressed digest.
|
||||
// This way we don't have to waste storage space with trivial (uncompressed, uncompressed) mappings
|
||||
// when we already record a (compressed, uncompressed) pair.
|
||||
if m, ok := mem.digestsByUncompressed[anyDigest]; ok && len(m) > 0 {
|
||||
return anyDigest
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
|
||||
// It’s allowed for anyDigest == uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (mem *memoryCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
|
||||
if previous, ok := mem.uncompressedDigests[anyDigest]; ok && previous != uncompressed {
|
||||
logrus.Warnf("Uncompressed digest for blob %s previously recorded as %s, now %s", anyDigest, previous, uncompressed)
|
||||
}
|
||||
mem.uncompressedDigests[anyDigest] = uncompressed
|
||||
|
||||
anyDigestSet, ok := mem.digestsByUncompressed[uncompressed]
|
||||
if !ok {
|
||||
anyDigestSet = map[digest.Digest]struct{}{}
|
||||
mem.digestsByUncompressed[uncompressed] = anyDigestSet
|
||||
}
|
||||
anyDigestSet[anyDigest] = struct{}{} // Possibly writing the same struct{}{} presence marker again.
|
||||
}
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
func (mem *memoryCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
||||
key := locationKey{transport: transport.Name(), scope: scope, blobDigest: blobDigest}
|
||||
locationScope, ok := mem.knownLocations[key]
|
||||
if !ok {
|
||||
locationScope = map[types.BICLocationReference]time.Time{}
|
||||
mem.knownLocations[key] = locationScope
|
||||
}
|
||||
locationScope[location] = time.Now() // Possibly overwriting an older entry.
|
||||
}
|
||||
|
||||
// appendReplacementCandiates creates candidateWithTime values for (transport, scope, digest), and returns the result of appending them to candidates.
|
||||
func (mem *memoryCache) appendReplacementCandidates(candidates []candidateWithTime, transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest) []candidateWithTime {
|
||||
locations := mem.knownLocations[locationKey{transport: transport.Name(), scope: scope, blobDigest: digest}] // nil if not present
|
||||
for l, t := range locations {
|
||||
candidates = append(candidates, candidateWithTime{
|
||||
candidate: types.BICReplacementCandidate{
|
||||
Digest: digest,
|
||||
Location: l,
|
||||
},
|
||||
lastSeen: t,
|
||||
})
|
||||
}
|
||||
return candidates
|
||||
}
|
||||
|
||||
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (mem *memoryCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, primaryDigest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
res := []candidateWithTime{}
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, primaryDigest)
|
||||
var uncompressedDigest digest.Digest // = ""
|
||||
if canSubstitute {
|
||||
if uncompressedDigest = mem.UncompressedDigest(primaryDigest); uncompressedDigest != "" {
|
||||
otherDigests := mem.digestsByUncompressed[uncompressedDigest] // nil if not present in the map
|
||||
for d := range otherDigests {
|
||||
if d != primaryDigest && d != uncompressedDigest {
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, d)
|
||||
}
|
||||
}
|
||||
if uncompressedDigest != primaryDigest {
|
||||
res = mem.appendReplacementCandidates(res, transport, scope, uncompressedDigest)
|
||||
}
|
||||
}
|
||||
}
|
||||
return destructivelyPrioritizeReplacementCandidates(res, primaryDigest, uncompressedDigest)
|
||||
}
|
47
vendor/github.com/containers/image/pkg/blobinfocache/none.go
generated
vendored
Normal file
47
vendor/github.com/containers/image/pkg/blobinfocache/none.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
package blobinfocache
|
||||
|
||||
import (
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// noCache implements a dummy BlobInfoCache which records no data.
|
||||
type noCache struct {
|
||||
}
|
||||
|
||||
// NoCache implements BlobInfoCache by not recording any data.
|
||||
//
|
||||
// This exists primarily for implementations of configGetter for Manifest.Inspect,
|
||||
// because configs only have one representation.
|
||||
// Any use of BlobInfoCache with blobs should usually use at least a short-lived cache.
|
||||
var NoCache types.BlobInfoCache = noCache{}
|
||||
|
||||
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||
// May return anyDigest if it is known to be uncompressed.
|
||||
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||
func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest {
|
||||
return ""
|
||||
}
|
||||
|
||||
// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
|
||||
// It’s allowed for anyDigest == uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) {
|
||||
}
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) {
|
||||
}
|
||||
|
||||
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate {
|
||||
return nil
|
||||
}
|
108
vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go
generated
vendored
Normal file
108
vendor/github.com/containers/image/pkg/blobinfocache/prioritize.go
generated
vendored
Normal file
@ -0,0 +1,108 @@
|
||||
package blobinfocache
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/containers/image/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
|
||||
// replacementAttempts is the number of blob replacement candidates returned by destructivelyPrioritizeReplacementCandidates,
|
||||
// and therefore ultimately by types.BlobInfoCache.CandidateLocations.
|
||||
// This is a heuristic/guess, and could well use a different value.
|
||||
const replacementAttempts = 5
|
||||
|
||||
// candidateWithTime is the input to types.BICReplacementCandidate prioritization.
|
||||
type candidateWithTime struct {
|
||||
candidate types.BICReplacementCandidate // The replacement candidate
|
||||
lastSeen time.Time // Time the candidate was last known to exist (either read or written)
|
||||
}
|
||||
|
||||
// candidateSortState is a local state implementing sort.Interface on candidates to prioritize,
|
||||
// along with the specially-treated digest values for the implementation of sort.Interface.Less
|
||||
type candidateSortState struct {
|
||||
cs []candidateWithTime // The entries to sort
|
||||
primaryDigest digest.Digest // The digest the user actually asked for
|
||||
uncompressedDigest digest.Digest // The uncompressed digest corresponding to primaryDigest. May be "", or even equal to primaryDigest
|
||||
}
|
||||
|
||||
func (css *candidateSortState) Len() int {
|
||||
return len(css.cs)
|
||||
}
|
||||
|
||||
func (css *candidateSortState) Less(i, j int) bool {
|
||||
xi := css.cs[i]
|
||||
xj := css.cs[j]
|
||||
|
||||
// primaryDigest entries come first, more recent first.
|
||||
// uncompressedDigest entries, if uncompressedDigest is set and != primaryDigest, come last, more recent entry first.
|
||||
// Other digest values are primarily sorted by time (more recent first), secondarily by digest (to provide a deterministic order)
|
||||
|
||||
// First, deal with the primaryDigest/uncompressedDigest cases:
|
||||
if xi.candidate.Digest != xj.candidate.Digest {
|
||||
// - The two digests are different, and one (or both) of the digests is primaryDigest or uncompressedDigest: time does not matter
|
||||
if xi.candidate.Digest == css.primaryDigest {
|
||||
return true
|
||||
}
|
||||
if xj.candidate.Digest == css.primaryDigest {
|
||||
return false
|
||||
}
|
||||
if css.uncompressedDigest != "" {
|
||||
if xi.candidate.Digest == css.uncompressedDigest {
|
||||
return false
|
||||
}
|
||||
if xj.candidate.Digest == css.uncompressedDigest {
|
||||
return true
|
||||
}
|
||||
}
|
||||
} else { // xi.candidate.Digest == xj.candidate.Digest
|
||||
// The two digests are the same, and are either primaryDigest or uncompressedDigest: order by time
|
||||
if xi.candidate.Digest == css.primaryDigest || (css.uncompressedDigest != "" && xi.candidate.Digest == css.uncompressedDigest) {
|
||||
return xi.lastSeen.After(xj.lastSeen)
|
||||
}
|
||||
}
|
||||
|
||||
// Neither of the digests are primaryDigest/uncompressedDigest:
|
||||
if !xi.lastSeen.Equal(xj.lastSeen) { // Order primarily by time
|
||||
return xi.lastSeen.After(xj.lastSeen)
|
||||
}
|
||||
// Fall back to digest, if timestamps end up _exactly_ the same (how?!)
|
||||
return xi.candidate.Digest < xj.candidate.Digest
|
||||
}
|
||||
|
||||
func (css *candidateSortState) Swap(i, j int) {
|
||||
css.cs[i], css.cs[j] = css.cs[j], css.cs[i]
|
||||
}
|
||||
|
||||
// destructivelyPrioritizeReplacementCandidatesWithMax is destructivelyPrioritizeReplacementCandidates with a parameter for the
|
||||
// number of entries to limit, only to make testing simpler.
|
||||
func destructivelyPrioritizeReplacementCandidatesWithMax(cs []candidateWithTime, primaryDigest, uncompressedDigest digest.Digest, maxCandidates int) []types.BICReplacementCandidate {
|
||||
// We don't need to use sort.Stable() because nanosecond timestamps are (presumably?) unique, so no two elements should
|
||||
// compare equal.
|
||||
sort.Sort(&candidateSortState{
|
||||
cs: cs,
|
||||
primaryDigest: primaryDigest,
|
||||
uncompressedDigest: uncompressedDigest,
|
||||
})
|
||||
|
||||
resLength := len(cs)
|
||||
if resLength > maxCandidates {
|
||||
resLength = maxCandidates
|
||||
}
|
||||
res := make([]types.BICReplacementCandidate, resLength)
|
||||
for i := range res {
|
||||
res[i] = cs[i].candidate
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// destructivelyPrioritizeReplacementCandidates consumes AND DESTROYS an array of possible replacement candidates with their last known existence times,
|
||||
// the primary digest the user actually asked for, and the corresponding uncompressed digest (if known, possibly equal to the primary digest),
|
||||
// and returns an appropriately prioritized and/or trimmed result suitable for a return value from types.BlobInfoCache.CandidateLocations.
|
||||
//
|
||||
// WARNING: The array of candidates is destructively modified. (The implementation of this function could of course
|
||||
// make a copy, but all CandidateLocations implementations build the slice of candidates only for the single purpose of calling this function anyway.)
|
||||
func destructivelyPrioritizeReplacementCandidates(cs []candidateWithTime, primaryDigest, uncompressedDigest digest.Digest) []types.BICReplacementCandidate {
|
||||
return destructivelyPrioritizeReplacementCandidatesWithMax(cs, primaryDigest, uncompressedDigest, replacementAttempts)
|
||||
}
|
108
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
108
vendor/github.com/containers/image/storage/storage_image.go
generated
vendored
@ -16,6 +16,7 @@ import (
|
||||
"github.com/containers/image/image"
|
||||
"github.com/containers/image/internal/tmpdir"
|
||||
"github.com/containers/image/manifest"
|
||||
"github.com/containers/image/pkg/blobinfocache"
|
||||
"github.com/containers/image/types"
|
||||
"github.com/containers/storage"
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
@ -99,8 +100,10 @@ func (s storageImageSource) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBlob reads the data blob or filesystem layer which matches the digest and size, if given.
|
||||
func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo) (rc io.ReadCloser, n int64, err error) {
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (s *storageImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (rc io.ReadCloser, n int64, err error) {
|
||||
if info.Digest == image.GzippedEmptyLayerDigest {
|
||||
return ioutil.NopCloser(bytes.NewReader(image.GzippedEmptyLayer)), int64(len(image.GzippedEmptyLayer)), nil
|
||||
}
|
||||
@ -317,9 +320,17 @@ func (s *storageImageDestination) computeNextBlobCacheFile() string {
|
||||
return filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1)))
|
||||
}
|
||||
|
||||
// PutBlob stores a layer or data blob in our temporary directory, checking that any information
|
||||
// in the blobinfo matches the incoming data.
|
||||
func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, isConfig bool) (types.BlobInfo, error) {
|
||||
// PutBlob writes contents of stream and returns data representing the result.
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader, blobinfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) {
|
||||
// Stores a layer or data blob in our temporary directory, checking that any information
|
||||
// in the blobinfo matches the incoming data.
|
||||
errorBlobInfo := types.BlobInfo{
|
||||
Digest: "",
|
||||
Size: -1,
|
||||
@ -370,6 +381,8 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
if blobSize < 0 {
|
||||
blobSize = counter.Count
|
||||
}
|
||||
// This is safe because we have just computed both values ourselves.
|
||||
cache.RecordDigestUncompressedPair(blobDigest, diffID.Digest())
|
||||
return types.BlobInfo{
|
||||
Digest: blobDigest,
|
||||
Size: blobSize,
|
||||
@ -377,59 +390,82 @@ func (s *storageImageDestination) PutBlob(ctx context.Context, stream io.Reader,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be
|
||||
// reapplied using ReapplyBlob.
|
||||
//
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
func (s *storageImageDestination) HasBlob(ctx context.Context, blobinfo types.BlobInfo) (bool, int64, error) {
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
func (s *storageImageDestination) TryReusingBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) {
|
||||
if blobinfo.Digest == "" {
|
||||
return false, -1, errors.Errorf(`Can not check for a blob with unknown digest`)
|
||||
return false, types.BlobInfo{}, errors.Errorf(`Can not check for a blob with unknown digest`)
|
||||
}
|
||||
if err := blobinfo.Digest.Validate(); err != nil {
|
||||
return false, -1, errors.Wrapf(err, `Can not check for a blob with invalid digest`)
|
||||
return false, types.BlobInfo{}, errors.Wrapf(err, `Can not check for a blob with invalid digest`)
|
||||
}
|
||||
|
||||
// Check if we've already cached it in a file.
|
||||
if size, ok := s.fileSizes[blobinfo.Digest]; ok {
|
||||
return true, size, nil
|
||||
return true, types.BlobInfo{
|
||||
Digest: blobinfo.Digest,
|
||||
Size: size,
|
||||
MediaType: blobinfo.MediaType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Check if we have a wasn't-compressed layer in storage that's based on that blob.
|
||||
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest)
|
||||
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
|
||||
return false, -1, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest)
|
||||
return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest)
|
||||
}
|
||||
if len(layers) > 0 {
|
||||
// Save this for completeness.
|
||||
s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
|
||||
return true, layers[0].UncompressedSize, nil
|
||||
return true, types.BlobInfo{
|
||||
Digest: blobinfo.Digest,
|
||||
Size: layers[0].UncompressedSize,
|
||||
MediaType: blobinfo.MediaType,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Check if we have a was-compressed layer in storage that's based on that blob.
|
||||
layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest)
|
||||
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
|
||||
return false, -1, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest)
|
||||
return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest)
|
||||
}
|
||||
if len(layers) > 0 {
|
||||
// Record the uncompressed value so that we can use it to calculate layer IDs.
|
||||
s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest
|
||||
return true, layers[0].CompressedSize, nil
|
||||
return true, types.BlobInfo{
|
||||
Digest: blobinfo.Digest,
|
||||
Size: layers[0].CompressedSize,
|
||||
MediaType: blobinfo.MediaType,
|
||||
}, nil
|
||||
}
|
||||
// Nope, we don't have it.
|
||||
return false, -1, nil
|
||||
}
|
||||
|
||||
// ReapplyBlob is now a no-op, assuming HasBlob() says we already have it, since Commit() can just apply the
|
||||
// same one when it walks the list in the manifest.
|
||||
func (s *storageImageDestination) ReapplyBlob(ctx context.Context, blobinfo types.BlobInfo) (types.BlobInfo, error) {
|
||||
present, size, err := s.HasBlob(ctx, blobinfo)
|
||||
if !present {
|
||||
return types.BlobInfo{}, errors.Errorf("error reapplying blob %+v: blob was not previously applied", blobinfo)
|
||||
// Does the blob correspond to a known DiffID which we already have available?
|
||||
// Because we must return the size, which is unknown for unavailable compressed blobs, the returned BlobInfo refers to the
|
||||
// uncompressed layer, and that can happen only if canSubstitute.
|
||||
if canSubstitute {
|
||||
if uncompressedDigest := cache.UncompressedDigest(blobinfo.Digest); uncompressedDigest != "" && uncompressedDigest != blobinfo.Digest {
|
||||
layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(uncompressedDigest)
|
||||
if err != nil && errors.Cause(err) != storage.ErrLayerUnknown {
|
||||
return false, types.BlobInfo{}, errors.Wrapf(err, `Error looking for layers with digest %q`, uncompressedDigest)
|
||||
}
|
||||
if len(layers) > 0 {
|
||||
s.blobDiffIDs[uncompressedDigest] = layers[0].UncompressedDigest
|
||||
return true, types.BlobInfo{
|
||||
Digest: uncompressedDigest,
|
||||
Size: layers[0].UncompressedSize,
|
||||
MediaType: blobinfo.MediaType,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return types.BlobInfo{}, errors.Wrapf(err, "error reapplying blob %+v", blobinfo)
|
||||
}
|
||||
blobinfo.Size = size
|
||||
return blobinfo, nil
|
||||
|
||||
// Nope, we don't have it.
|
||||
return false, types.BlobInfo{}, nil
|
||||
}
|
||||
|
||||
// computeID computes a recommended image ID based on information we have so far. If
|
||||
@ -514,8 +550,12 @@ func (s *storageImageDestination) Commit(ctx context.Context) error {
|
||||
if !haveDiffID {
|
||||
// Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(),
|
||||
// or to even check if we had it.
|
||||
// Use blobinfocache.NoCache to avoid a repeated DiffID lookup in the BlobInfoCache; a caller
|
||||
// that relies on using a blob digest that has never been seeen by the store had better call
|
||||
// TryReusingBlob; not calling PutBlob already violates the documented API, so there’s only
|
||||
// so far we are going to accommodate that (if we should be doing that at all).
|
||||
logrus.Debugf("looking for diffID for blob %+v", blob.Digest)
|
||||
has, _, err := s.HasBlob(ctx, blob.BlobInfo)
|
||||
has, _, err := s.TryReusingBlob(ctx, blob.BlobInfo, blobinfocache.NoCache, false)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String())
|
||||
}
|
||||
|
5
vendor/github.com/containers/image/tarball/tarball_src.go
generated
vendored
5
vendor/github.com/containers/image/tarball/tarball_src.go
generated
vendored
@ -207,7 +207,10 @@ func (is *tarballImageSource) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo) (io.ReadCloser, int64, error) {
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
func (is *tarballImageSource) GetBlob(ctx context.Context, blobinfo types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
||||
// We should only be asked about things in the manifest. Maybe the configuration blob.
|
||||
if blobinfo.Digest == is.configID {
|
||||
return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil
|
||||
|
102
vendor/github.com/containers/image/types/types.go
generated
vendored
102
vendor/github.com/containers/image/types/types.go
generated
vendored
@ -100,6 +100,82 @@ type BlobInfo struct {
|
||||
MediaType string
|
||||
}
|
||||
|
||||
// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present.
|
||||
// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data aboud blobs keyed by (scope, digest).
|
||||
// The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable.
|
||||
//
|
||||
// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different
|
||||
// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility,
|
||||
// at least by not failing hard when encountering unknown data.
|
||||
type BICTransportScope struct {
|
||||
Opaque string
|
||||
}
|
||||
|
||||
// BICLocationReference encapsulates transport-dependent representation of a blob location within a BICTransportScope.
|
||||
// Each transport can store arbitrary data using BlobInfoCache.RecordKnownLocation, and ImageDestination.TryReusingBlob
|
||||
// can look it up using BlobInfoCache.CandidateLocations.
|
||||
//
|
||||
// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different
|
||||
// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility,
|
||||
// at least by not failing hard when encountering unknown data.
|
||||
type BICLocationReference struct {
|
||||
Opaque string
|
||||
}
|
||||
|
||||
// BICReplacementCandidate is an item returned by BlobInfoCache.CandidateLocations.
|
||||
type BICReplacementCandidate struct {
|
||||
Digest digest.Digest
|
||||
Location BICLocationReference
|
||||
}
|
||||
|
||||
// BlobInfoCache records data useful for reusing blobs, or substituing equivalent ones, to avoid unnecessary blob copies.
|
||||
//
|
||||
// It records two kinds of data:
|
||||
// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
|
||||
// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest.
|
||||
// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompresssion),
|
||||
// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/
|
||||
//
|
||||
// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known
|
||||
// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value).
|
||||
//
|
||||
// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently
|
||||
// compress/decompress blobs for their own purposes.
|
||||
//
|
||||
// - Known blob locations, managed by individual transports:
|
||||
// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob),
|
||||
// recording transport-specific information that allows the transport to reuse the blob in the future;
|
||||
// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused.
|
||||
//
|
||||
// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs
|
||||
// can be directly reused within a registry, or mounted across registries within a registry server.)
|
||||
//
|
||||
// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal;
|
||||
// users of the cahce should just fall back to copying the blobs the usual way.
|
||||
type BlobInfoCache interface {
|
||||
// UncompressedDigest returns an uncompressed digest corresponding to anyDigest.
|
||||
// May return anyDigest if it is known to be uncompressed.
|
||||
// Returns "" if nothing is known about the digest (it may be compressed or uncompressed).
|
||||
UncompressedDigest(anyDigest digest.Digest) digest.Digest
|
||||
// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed.
|
||||
// It’s allowed for anyDigest == uncompressed.
|
||||
// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g.
|
||||
// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs.
|
||||
// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.)
|
||||
RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest)
|
||||
|
||||
// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope,
|
||||
// and can be reused given the opaque location data.
|
||||
RecordKnownLocation(transport ImageTransport, scope BICTransportScope, digest digest.Digest, location BICLocationReference)
|
||||
// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused
|
||||
// within the specified (transport scope) (if they still exist, which is not guaranteed).
|
||||
//
|
||||
// If !canSubstitute, the returned cadidates will match the submitted digest exactly; if canSubstitute,
|
||||
// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same
|
||||
// uncompressed digest.
|
||||
CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate
|
||||
}
|
||||
|
||||
// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list).
|
||||
// This is primarily useful for copying images around; for examining their properties, Image (below)
|
||||
// is usually more useful.
|
||||
@ -120,7 +196,8 @@ type ImageSource interface {
|
||||
GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error)
|
||||
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
||||
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
||||
GetBlob(context.Context, BlobInfo) (io.ReadCloser, int64, error)
|
||||
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
||||
GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error)
|
||||
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
||||
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for
|
||||
// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list
|
||||
@ -148,8 +225,7 @@ const (
|
||||
// ImageDestination is a service, possibly remote (= slow), to store components of a single image.
|
||||
//
|
||||
// There is a specific required order for some of the calls:
|
||||
// PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time)
|
||||
// ReapplyBlob, if used, MUST only be called if HasBlob returned true for the same blob digest
|
||||
// TryReusingBlob/PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time)
|
||||
// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents)
|
||||
// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist.
|
||||
//
|
||||
@ -183,17 +259,19 @@ type ImageDestination interface {
|
||||
// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
|
||||
// inputInfo.Size is the expected length of stream, if known.
|
||||
// inputInfo.MediaType describes the blob format, if known.
|
||||
// May update cache.
|
||||
// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
|
||||
// to any other readers for download using the supplied digest.
|
||||
// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
|
||||
PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, isConfig bool) (BlobInfo, error)
|
||||
// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
|
||||
// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
|
||||
// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
|
||||
// it returns a non-nil error only on an unexpected failure.
|
||||
HasBlob(ctx context.Context, info BlobInfo) (bool, int64, error)
|
||||
// ReapplyBlob informs the image destination that a blob for which HasBlob previously returned true would have been passed to PutBlob if it had returned false. Like HasBlob and unlike PutBlob, the digest can not be empty. If the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree.
|
||||
ReapplyBlob(ctx context.Context, info BlobInfo) (BlobInfo, error)
|
||||
PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error)
|
||||
// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination
|
||||
// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree).
|
||||
// info.Digest must not be empty.
|
||||
// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input.
|
||||
// If the blob has been succesfully reused, returns (true, info, nil); info must contain at least a digest and size.
|
||||
// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure.
|
||||
// May use and/or update cache.
|
||||
TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error)
|
||||
// PutManifest writes manifest to the destination.
|
||||
// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
|
||||
// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
|
||||
@ -375,6 +453,8 @@ type SystemContext struct {
|
||||
ArchitectureChoice string
|
||||
// If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match.
|
||||
OSChoice string
|
||||
// If not "", overrides the system's default directory containing a blob info cache.
|
||||
BlobInfoCacheDir string
|
||||
|
||||
// Additional tags when creating or copying a docker-archive.
|
||||
DockerArchiveAdditionalTags []reference.NamedTagged
|
||||
|
1
vendor/github.com/containers/image/vendor.conf
generated
vendored
1
vendor/github.com/containers/image/vendor.conf
generated
vendored
@ -43,3 +43,4 @@ github.com/syndtr/gocapability master
|
||||
github.com/Microsoft/go-winio ab35fc04b6365e8fcb18e6e9e41ea4a02b10b175
|
||||
github.com/Microsoft/hcsshim eca7177590cdcbd25bbc5df27e3b693a54b53a6a
|
||||
github.com/ulikunitz/xz v0.5.4
|
||||
github.com/boltdb/bolt master
|
||||
|
277
vendor/github.com/containers/storage/drivers/copy/copy.go
generated
vendored
Normal file
277
vendor/github.com/containers/storage/drivers/copy/copy.go
generated
vendored
Normal file
@ -0,0 +1,277 @@
|
||||
// +build linux
|
||||
|
||||
package copy
|
||||
|
||||
/*
|
||||
#include <linux/fs.h>
|
||||
|
||||
#ifndef FICLONE
|
||||
#define FICLONE _IOW(0x94, 9, int)
|
||||
#endif
|
||||
*/
|
||||
import "C"
|
||||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containers/storage/pkg/pools"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Mode indicates whether to use hardlink or copy content
|
||||
type Mode int
|
||||
|
||||
const (
|
||||
// Content creates a new file, and copies the content of the file
|
||||
Content Mode = iota
|
||||
// Hardlink creates a new hardlink to the existing file
|
||||
Hardlink
|
||||
)
|
||||
|
||||
func copyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error {
|
||||
srcFile, err := os.Open(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer srcFile.Close()
|
||||
|
||||
// If the destination file already exists, we shouldn't blow it away
|
||||
dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, fileinfo.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer dstFile.Close()
|
||||
|
||||
if *copyWithFileClone {
|
||||
_, _, err = unix.Syscall(unix.SYS_IOCTL, dstFile.Fd(), C.FICLONE, srcFile.Fd())
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
*copyWithFileClone = false
|
||||
if err == unix.EXDEV {
|
||||
*copyWithFileRange = false
|
||||
}
|
||||
}
|
||||
if *copyWithFileRange {
|
||||
err = doCopyWithFileRange(srcFile, dstFile, fileinfo)
|
||||
// Trying the file_clone may not have caught the exdev case
|
||||
// as the ioctl may not have been available (therefore EINVAL)
|
||||
if err == unix.EXDEV || err == unix.ENOSYS {
|
||||
*copyWithFileRange = false
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return legacyCopy(srcFile, dstFile)
|
||||
}
|
||||
|
||||
func doCopyWithFileRange(srcFile, dstFile *os.File, fileinfo os.FileInfo) error {
|
||||
amountLeftToCopy := fileinfo.Size()
|
||||
|
||||
for amountLeftToCopy > 0 {
|
||||
n, err := unix.CopyFileRange(int(srcFile.Fd()), nil, int(dstFile.Fd()), nil, int(amountLeftToCopy), 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
amountLeftToCopy = amountLeftToCopy - int64(n)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func legacyCopy(srcFile io.Reader, dstFile io.Writer) error {
|
||||
_, err := pools.Copy(dstFile, srcFile)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func copyXattr(srcPath, dstPath, attr string) error {
|
||||
data, err := system.Lgetxattr(srcPath, attr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if data != nil {
|
||||
if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type fileID struct {
|
||||
dev uint64
|
||||
ino uint64
|
||||
}
|
||||
|
||||
type dirMtimeInfo struct {
|
||||
dstPath *string
|
||||
stat *syscall.Stat_t
|
||||
}
|
||||
|
||||
// DirCopy copies or hardlinks the contents of one directory to another,
|
||||
// properly handling xattrs, and soft links
|
||||
//
|
||||
// Copying xattrs can be opted out of by passing false for copyXattrs.
|
||||
func DirCopy(srcDir, dstDir string, copyMode Mode, copyXattrs bool) error {
|
||||
copyWithFileRange := true
|
||||
copyWithFileClone := true
|
||||
|
||||
// This is a map of source file inodes to dst file paths
|
||||
copiedFiles := make(map[fileID]string)
|
||||
|
||||
dirsToSetMtimes := list.New()
|
||||
err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
relPath, err := filepath.Rel(srcDir, srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dstPath := filepath.Join(dstDir, relPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stat, ok := f.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath)
|
||||
}
|
||||
|
||||
isHardlink := false
|
||||
|
||||
switch f.Mode() & os.ModeType {
|
||||
case 0: // Regular file
|
||||
id := fileID{dev: stat.Dev, ino: stat.Ino}
|
||||
if copyMode == Hardlink {
|
||||
isHardlink = true
|
||||
if err2 := os.Link(srcPath, dstPath); err2 != nil {
|
||||
return err2
|
||||
}
|
||||
} else if hardLinkDstPath, ok := copiedFiles[id]; ok {
|
||||
if err2 := os.Link(hardLinkDstPath, dstPath); err2 != nil {
|
||||
return err2
|
||||
}
|
||||
} else {
|
||||
if err2 := copyRegular(srcPath, dstPath, f, ©WithFileRange, ©WithFileClone); err2 != nil {
|
||||
return err2
|
||||
}
|
||||
copiedFiles[id] = dstPath
|
||||
}
|
||||
|
||||
case os.ModeDir:
|
||||
if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
case os.ModeSymlink:
|
||||
link, err := os.Readlink(srcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Symlink(link, dstPath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case os.ModeNamedPipe:
|
||||
fallthrough
|
||||
case os.ModeSocket:
|
||||
if err := unix.Mkfifo(dstPath, stat.Mode); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case os.ModeDevice:
|
||||
if rsystem.RunningInUserNS() {
|
||||
// cannot create a device if running in user namespace
|
||||
return nil
|
||||
}
|
||||
if err := unix.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown file type for %s", srcPath)
|
||||
}
|
||||
|
||||
// Everything below is copying metadata from src to dst. All this metadata
|
||||
// already shares an inode for hardlinks.
|
||||
if isHardlink {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if copyXattrs {
|
||||
if err := doCopyXattrs(srcPath, dstPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
isSymlink := f.Mode()&os.ModeSymlink != 0
|
||||
|
||||
// There is no LChmod, so ignore mode for symlink. Also, this
|
||||
// must happen after chown, as that can modify the file mode
|
||||
if !isSymlink {
|
||||
if err := os.Chmod(dstPath, f.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// system.Chtimes doesn't support a NOFOLLOW flag atm
|
||||
// nolint: unconvert
|
||||
if f.IsDir() {
|
||||
dirsToSetMtimes.PushFront(&dirMtimeInfo{dstPath: &dstPath, stat: stat})
|
||||
} else if !isSymlink {
|
||||
aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec))
|
||||
mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec))
|
||||
if err := system.Chtimes(dstPath, aTime, mTime); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
ts := []syscall.Timespec{stat.Atim, stat.Mtim}
|
||||
if err := system.LUtimesNano(dstPath, ts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for e := dirsToSetMtimes.Front(); e != nil; e = e.Next() {
|
||||
mtimeInfo := e.Value.(*dirMtimeInfo)
|
||||
ts := []syscall.Timespec{mtimeInfo.stat.Atim, mtimeInfo.stat.Mtim}
|
||||
if err := system.LUtimesNano(*mtimeInfo.dstPath, ts); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func doCopyXattrs(srcPath, dstPath string) error {
|
||||
if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// We need to copy this attribute if it appears in an overlay upper layer, as
|
||||
// this function is used to copy those. It is set by overlay if a directory
|
||||
// is removed and then re-created and should not inherit anything from the
|
||||
// same dir in the lower dir.
|
||||
return copyXattr(srcPath, dstPath, "trusted.overlay.opaque")
|
||||
}
|
2
vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/devmapper/deviceset.go
generated
vendored
@ -2401,7 +2401,7 @@ func (devices *DeviceSet) MountDevice(hash, path string, moptions graphdriver.Mo
|
||||
addNouuid := strings.Contains("nouuid", mountOptions)
|
||||
mountOptions = strings.Join(moptions.Options, ",")
|
||||
if addNouuid {
|
||||
mountOptions = fmt.Sprintf("nouuid,", mountOptions)
|
||||
mountOptions = fmt.Sprintf("nouuid,%s", mountOptions)
|
||||
}
|
||||
}
|
||||
|
||||
|
5
vendor/github.com/containers/storage/drivers/fsdiff.go
generated
vendored
5
vendor/github.com/containers/storage/drivers/fsdiff.go
generated
vendored
@ -8,6 +8,7 @@ import (
|
||||
"github.com/containers/storage/pkg/chrootarchive"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/ioutils"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -167,7 +168,9 @@ func (gdw *NaiveDiffDriver) ApplyDiff(id string, applyMappings *idtools.IDMappin
|
||||
}
|
||||
defer driver.Put(id)
|
||||
|
||||
options := &archive.TarOptions{}
|
||||
options := &archive.TarOptions{
|
||||
InUserNS: rsystem.RunningInUserNS(),
|
||||
}
|
||||
if applyMappings != nil {
|
||||
options.UIDMaps = applyMappings.UIDs()
|
||||
options.GIDMaps = applyMappings.GIDs()
|
||||
|
2
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
2
vendor/github.com/containers/storage/drivers/overlay/overlay.go
generated
vendored
@ -29,6 +29,7 @@ import (
|
||||
"github.com/containers/storage/pkg/parsers"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
units "github.com/docker/go-units"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -876,6 +877,7 @@ func (d *Driver) ApplyDiff(id string, idMappings *idtools.IDMappings, parent str
|
||||
UIDMaps: idMappings.UIDs(),
|
||||
GIDMaps: idMappings.GIDs(),
|
||||
WhiteoutFormat: d.getWhiteoutFormat(),
|
||||
InUserNS: rsystem.RunningInUserNS(),
|
||||
}); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
7
vendor/github.com/containers/storage/drivers/vfs/copy_linux.go
generated
vendored
Normal file
7
vendor/github.com/containers/storage/drivers/vfs/copy_linux.go
generated
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
package vfs
|
||||
|
||||
import "github.com/containers/storage/drivers/copy"
|
||||
|
||||
func dirCopy(srcDir, dstDir string) error {
|
||||
return copy.DirCopy(srcDir, dstDir, copy.Content, false)
|
||||
}
|
9
vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go
generated
vendored
Normal file
9
vendor/github.com/containers/storage/drivers/vfs/copy_unsupported.go
generated
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
// +build !linux
|
||||
|
||||
package vfs // import "github.com/containers/storage/drivers/vfs"
|
||||
|
||||
import "github.com/containers/storage/pkg/chrootarchive"
|
||||
|
||||
func dirCopy(srcDir, dstDir string) error {
|
||||
return chrootarchive.NewArchiver(nil).CopyWithTar(srcDir, dstDir)
|
||||
}
|
7
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
7
vendor/github.com/containers/storage/drivers/vfs/driver.go
generated
vendored
@ -7,7 +7,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/containers/storage/drivers"
|
||||
"github.com/containers/storage/pkg/chrootarchive"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
"github.com/containers/storage/pkg/ostree"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
@ -15,8 +14,8 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// CopyWithTar defines the copy method to use.
|
||||
CopyWithTar = chrootarchive.NewArchiver(nil).CopyWithTar
|
||||
// CopyDir defines the copy method to use.
|
||||
CopyDir = dirCopy
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -141,7 +140,7 @@ func (d *Driver) create(id, parent string, opts *graphdriver.CreateOpts, ro bool
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: %s", parent, err)
|
||||
}
|
||||
if err := CopyWithTar(parentDir, dir); err != nil {
|
||||
if err := dirCopy(parentDir, dir); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
5
vendor/github.com/containers/storage/layers.go
generated
vendored
5
vendor/github.com/containers/storage/layers.go
generated
vendored
@ -28,9 +28,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
tarSplitSuffix = ".tar-split.gz"
|
||||
incompleteFlag = "incomplete"
|
||||
compressionFlag = "diff-compression"
|
||||
tarSplitSuffix = ".tar-split.gz"
|
||||
incompleteFlag = "incomplete"
|
||||
)
|
||||
|
||||
// A Layer is a record of a copy-on-write layer that's stored by the lower
|
||||
|
5
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
5
vendor/github.com/containers/storage/pkg/archive/archive.go
generated
vendored
@ -22,6 +22,7 @@ import (
|
||||
"github.com/containers/storage/pkg/pools"
|
||||
"github.com/containers/storage/pkg/promise"
|
||||
"github.com/containers/storage/pkg/system"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -1054,6 +1055,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
|
||||
GIDMaps: tarMappings.GIDs(),
|
||||
Compression: Uncompressed,
|
||||
CopyPass: true,
|
||||
InUserNS: rsystem.RunningInUserNS(),
|
||||
}
|
||||
archive, err := TarWithOptions(src, options)
|
||||
if err != nil {
|
||||
@ -1068,6 +1070,7 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
|
||||
UIDMaps: untarMappings.UIDs(),
|
||||
GIDMaps: untarMappings.GIDs(),
|
||||
ChownOpts: archiver.ChownOpts,
|
||||
InUserNS: rsystem.RunningInUserNS(),
|
||||
}
|
||||
return archiver.Untar(archive, dst, options)
|
||||
}
|
||||
@ -1087,6 +1090,7 @@ func (archiver *Archiver) UntarPath(src, dst string) error {
|
||||
UIDMaps: untarMappings.UIDs(),
|
||||
GIDMaps: untarMappings.GIDs(),
|
||||
ChownOpts: archiver.ChownOpts,
|
||||
InUserNS: rsystem.RunningInUserNS(),
|
||||
}
|
||||
return archiver.Untar(archive, dst, options)
|
||||
}
|
||||
@ -1186,6 +1190,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
||||
UIDMaps: archiver.UntarIDMappings.UIDs(),
|
||||
GIDMaps: archiver.UntarIDMappings.GIDs(),
|
||||
ChownOpts: archiver.ChownOpts,
|
||||
InUserNS: rsystem.RunningInUserNS(),
|
||||
}
|
||||
err = archiver.Untar(r, filepath.Dir(dst), options)
|
||||
if err != nil {
|
||||
|
2
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
2
vendor/github.com/containers/storage/pkg/chrootarchive/archive.go
generated
vendored
@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/containers/storage/pkg/archive"
|
||||
"github.com/containers/storage/pkg/idtools"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
)
|
||||
|
||||
// NewArchiver returns a new Archiver which uses chrootarchive.Untar
|
||||
@ -52,6 +53,7 @@ func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions
|
||||
}
|
||||
if options == nil {
|
||||
options = &archive.TarOptions{}
|
||||
options.InUserNS = rsystem.RunningInUserNS()
|
||||
}
|
||||
if options.ExcludePatterns == nil {
|
||||
options.ExcludePatterns = []string{}
|
||||
|
4
vendor/github.com/containers/storage/vendor.conf
generated
vendored
4
vendor/github.com/containers/storage/vendor.conf
generated
vendored
@ -9,7 +9,7 @@ github.com/mistifyio/go-zfs c0224de804d438efd11ea6e52ada8014537d6062
|
||||
github.com/opencontainers/go-digest master
|
||||
github.com/opencontainers/runc 6c22e77604689db8725fa866f0f2ec0b3e8c3a07
|
||||
github.com/opencontainers/selinux 36a9bc45a08c85f2c52bd9eb32e20267876773bd
|
||||
github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460
|
||||
github.com/ostreedev/ostree-go master
|
||||
github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9
|
||||
github.com/pkg/errors master
|
||||
github.com/pmezard/go-difflib v1.0.0
|
||||
@ -21,3 +21,5 @@ github.com/tchap/go-patricia v2.2.6
|
||||
github.com/vbatts/tar-split v0.10.2
|
||||
golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
|
||||
golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
|
||||
gotest.tools master
|
||||
github.com/google/go-cmp master
|
||||
|
2
vendor/golang.org/x/crypto/openpgp/write.go
generated
vendored
2
vendor/golang.org/x/crypto/openpgp/write.go
generated
vendored
@ -271,6 +271,7 @@ func Encrypt(ciphertext io.Writer, to []*Entity, signed *Entity, hints *FileHint
|
||||
// These are the possible hash functions that we'll use for the signature.
|
||||
candidateHashes := []uint8{
|
||||
hashToHashId(crypto.SHA256),
|
||||
hashToHashId(crypto.SHA384),
|
||||
hashToHashId(crypto.SHA512),
|
||||
hashToHashId(crypto.SHA1),
|
||||
hashToHashId(crypto.RIPEMD160),
|
||||
@ -349,6 +350,7 @@ func Sign(output io.Writer, signed *Entity, hints *FileHints, config *packet.Con
|
||||
// These are the possible hash functions that we'll use for the signature.
|
||||
candidateHashes := []uint8{
|
||||
hashToHashId(crypto.SHA256),
|
||||
hashToHashId(crypto.SHA384),
|
||||
hashToHashId(crypto.SHA512),
|
||||
hashToHashId(crypto.SHA1),
|
||||
hashToHashId(crypto.RIPEMD160),
|
||||
|
25
vendor/golang.org/x/net/http2/transport.go
generated
vendored
25
vendor/golang.org/x/net/http2/transport.go
generated
vendored
@ -97,6 +97,16 @@ type Transport struct {
|
||||
// to mean no limit.
|
||||
MaxHeaderListSize uint32
|
||||
|
||||
// StrictMaxConcurrentStreams controls whether the server's
|
||||
// SETTINGS_MAX_CONCURRENT_STREAMS should be respected
|
||||
// globally. If false, new TCP connections are created to the
|
||||
// server as needed to keep each under the per-connection
|
||||
// SETTINGS_MAX_CONCURRENT_STREAMS limit. If true, the
|
||||
// server's SETTINGS_MAX_CONCURRENT_STREAMS is interpreted as
|
||||
// a global limit and callers of RoundTrip block when needed,
|
||||
// waiting for their turn.
|
||||
StrictMaxConcurrentStreams bool
|
||||
|
||||
// t1, if non-nil, is the standard library Transport using
|
||||
// this transport. Its settings are used (but not its
|
||||
// RoundTrip method, etc).
|
||||
@ -711,8 +721,19 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
|
||||
if cc.singleUse && cc.nextStreamID > 1 {
|
||||
return
|
||||
}
|
||||
st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing &&
|
||||
int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32
|
||||
var maxConcurrentOkay bool
|
||||
if cc.t.StrictMaxConcurrentStreams {
|
||||
// We'll tell the caller we can take a new request to
|
||||
// prevent the caller from dialing a new TCP
|
||||
// connection, but then we'll block later before
|
||||
// writing it.
|
||||
maxConcurrentOkay = true
|
||||
} else {
|
||||
maxConcurrentOkay = int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams)
|
||||
}
|
||||
|
||||
st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay &&
|
||||
int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32
|
||||
st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest
|
||||
return
|
||||
}
|
||||
|
2
vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
generated
vendored
2
vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include -m32 /tmp/include/asm/unistd.h
|
||||
// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -m32 /tmp/include/asm/unistd.h
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
// +build 386,linux
|
||||
|
2
vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
generated
vendored
2
vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include -m64 /tmp/include/asm/unistd.h
|
||||
// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -m64 /tmp/include/asm/unistd.h
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
// +build amd64,linux
|
||||
|
2
vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
generated
vendored
2
vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h
|
||||
// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
// +build arm,linux
|
||||
|
2
vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
generated
vendored
2
vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include -fsigned-char /tmp/include/asm/unistd.h
|
||||
// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -fsigned-char /tmp/include/asm/unistd.h
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
// +build arm64,linux
|
||||
|
2
vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
generated
vendored
2
vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h
|
||||
// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
// +build mips,linux
|
||||
|
2
vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
generated
vendored
2
vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h
|
||||
// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
// +build mips64,linux
|
||||
|
2
vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
generated
vendored
2
vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h
|
||||
// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
// +build mips64le,linux
|
||||
|
2
vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
generated
vendored
2
vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h
|
||||
// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
// +build mipsle,linux
|
||||
|
2
vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
generated
vendored
2
vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h
|
||||
// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
// +build ppc64,linux
|
||||
|
2
vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
generated
vendored
2
vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h
|
||||
// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
// +build ppc64le,linux
|
||||
|
2
vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
generated
vendored
2
vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h
|
||||
// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include /tmp/include/asm/unistd.h
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
// +build riscv64,linux
|
||||
|
2
vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
generated
vendored
2
vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// linux/mksysnum.pl -Wall -Werror -static -I/tmp/include -fsigned-char /tmp/include/asm/unistd.h
|
||||
// go run linux/mksysnum.go -Wall -Werror -static -I/tmp/include -fsigned-char /tmp/include/asm/unistd.h
|
||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||
|
||||
// +build s390x,linux
|
||||
|
25
vendor/k8s.io/client-go/README.md
generated
vendored
25
vendor/k8s.io/client-go/README.md
generated
vendored
@ -2,7 +2,7 @@
|
||||
|
||||
Go clients for talking to a [kubernetes](http://kubernetes.io/) cluster.
|
||||
|
||||
We currently recommend using the v9.0.0 tag. See [INSTALL.md](/INSTALL.md) for
|
||||
We currently recommend using the v10.0.0 tag. See [INSTALL.md](/INSTALL.md) for
|
||||
detailed installation instructions. `go get k8s.io/client-go/...` works, but
|
||||
will build `master`, which doesn't handle the dependencies well.
|
||||
|
||||
@ -91,16 +91,16 @@ We will backport bugfixes--but not new features--into older versions of
|
||||
|
||||
#### Compatibility matrix
|
||||
|
||||
| | Kubernetes 1.6 | Kubernetes 1.7 | Kubernetes 1.8 | Kubernetes 1.9 | Kubernetes 1.10 | Kubernetes 1.11 | Kubernetes 1.12 |
|
||||
|---------------------|----------------|----------------|----------------|----------------|-----------------|-----------------|-----------------|
|
||||
| client-go 3.0 | ✓ | - | +- | +- | +- | +- | +- |
|
||||
| client-go 4.0 | +- | ✓ | +- | +- | +- | +- | +- |
|
||||
| client-go 5.0 | +- | +- | ✓ | +- | +- | +- | +- |
|
||||
| client-go 6.0 | +- | +- | +- | ✓ | +- | +- | +- |
|
||||
| client-go 7.0 | +- | +- | +- | +- | ✓ | +- | +- |
|
||||
| client-go 8.0 | +- | +- | +- | +- | +- | ✓ | +- |
|
||||
| client-go 9.0 | +- | +- | +- | +- | +- | +- | ✓ |
|
||||
| client-go HEAD | +- | +- | +- | +- | +- | +- | +- |
|
||||
| | Kubernetes 1.7 | Kubernetes 1.8 | Kubernetes 1.9 | Kubernetes 1.10 | Kubernetes 1.11 | Kubernetes 1.12 | Kubernetes 1.13 |
|
||||
|---------------------|----------------|----------------|----------------|-----------------|-----------------|-----------------|-----------------|
|
||||
| client-go 4.0 | ✓ | +- | +- | +- | +- | +- | +- |
|
||||
| client-go 5.0 | +- | ✓ | +- | +- | +- | +- | +- |
|
||||
| client-go 6.0 | +- | +- | ✓ | +- | +- | +- | +- |
|
||||
| client-go 7.0 | +- | +- | +- | ✓ | +- | +- | +- |
|
||||
| client-go 8.0 | +- | +- | +- | +- | ✓ | +- | +- |
|
||||
| client-go 9.0 | +- | +- | +- | +- | +- | ✓ | +- |
|
||||
| client-go 10.0 | +- | +- | +- | +- | +- | +- | ✓ |
|
||||
| client-go HEAD | +- | +- | +- | +- | +- | +- | +- |
|
||||
|
||||
Key:
|
||||
|
||||
@ -128,9 +128,10 @@ between client-go versions.
|
||||
| client-go 4.0 | Kubernetes main repo, 1.7 branch | = - |
|
||||
| client-go 5.0 | Kubernetes main repo, 1.8 branch | = - |
|
||||
| client-go 6.0 | Kubernetes main repo, 1.9 branch | = - |
|
||||
| client-go 7.0 | Kubernetes main repo, 1.10 branch | ✓ |
|
||||
| client-go 7.0 | Kubernetes main repo, 1.10 branch | = - |
|
||||
| client-go 8.0 | Kubernetes main repo, 1.11 branch | ✓ |
|
||||
| client-go 9.0 | Kubernetes main repo, 1.12 branch | ✓ |
|
||||
| client-go 10.0 | Kubernetes main repo, 1.13 branch | ✓ |
|
||||
| client-go HEAD | Kubernetes main repo, master branch | ✓ |
|
||||
|
||||
Key:
|
||||
|
Loading…
Reference in New Issue
Block a user