diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index f93395b03ac..d9504d3f3ac 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -132,8 +132,8 @@ }, { "ImportPath": "github.com/boltdb/bolt", - "Comment": "v1.0-119-g90fef38", - "Rev": "90fef389f98027ca55594edd7dbd6e7f3926fdad" + "Comment": "v1.1.0-65-gee4a088", + "Rev": "ee4a0888a9abe7eefe5a0992ca4cb06864839873" }, { "ImportPath": "github.com/bradfitz/http2", @@ -824,8 +824,8 @@ }, { "ImportPath": "github.com/onsi/ginkgo", - "Comment": "v1.2.0-6-gd981d36", - "Rev": "d981d36e9884231afa909627b9c275e4ba678f90" + "Comment": "v1.2.0-42-g07d85e6", + "Rev": "07d85e6b10c4289c7d612f9b13f45ba36f66d55b" }, { "ImportPath": "github.com/onsi/gomega", diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile b/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile index cfbed514bbb..e035e63adcd 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/Makefile @@ -1,54 +1,18 @@ -TEST=. -BENCH=. -COVERPROFILE=/tmp/c.out BRANCH=`git rev-parse --abbrev-ref HEAD` COMMIT=`git rev-parse --short HEAD` GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" default: build -bench: - go test -v -test.run=NOTHINCONTAINSTHIS -test.bench=$(BENCH) - -# http://cloc.sourceforge.net/ -cloc: - @cloc --not-match-f='Makefile|_test.go' . - -cover: fmt - go test -coverprofile=$(COVERPROFILE) -test.run=$(TEST) $(COVERFLAG) . - go tool cover -html=$(COVERPROFILE) - rm $(COVERPROFILE) - -cpuprofile: fmt - @go test -c - @./bolt.test -test.v -test.run=$(TEST) -test.cpuprofile cpu.prof +race: + @go test -v -race -test.run="TestSimulate_(100op|1000op)" # go get github.com/kisielk/errcheck errcheck: - @echo "=== errcheck ===" - @errcheck github.com/boltdb/bolt + @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt -fmt: - @go fmt ./... +test: + @go test -v -cover . + @go test -v ./cmd/bolt -get: - @go get -d ./... - -build: get - @mkdir -p bin - @go build -ldflags=$(GOLDFLAGS) -a -o bin/bolt ./cmd/bolt - -test: fmt - @go get github.com/stretchr/testify/assert - @echo "=== TESTS ===" - @go test -v -cover -test.run=$(TEST) - @echo "" - @echo "" - @echo "=== CLI ===" - @go test -v -test.run=$(TEST) ./cmd/bolt - @echo "" - @echo "" - @echo "=== RACE DETECTOR ===" - @go test -v -race -test.run="TestSimulate_(100op|1000op)" - -.PHONY: bench cloc cover cpuprofile fmt memprofile test +.PHONY: fmt test diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/README.md b/Godeps/_workspace/src/github.com/boltdb/bolt/README.md index 00fad6afb8b..82e85742c04 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/README.md +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/README.md @@ -1,8 +1,8 @@ -Bolt [![Build Status](https://drone.io/github.com/boltdb/bolt/status.png)](https://drone.io/github.com/boltdb/bolt/latest) [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.png?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.png)](https://godoc.org/github.com/boltdb/bolt) ![Version](http://img.shields.io/badge/version-1.0-green.png) +Bolt [![Build Status](https://drone.io/github.com/boltdb/bolt/status.png)](https://drone.io/github.com/boltdb/bolt/latest) [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.0-green.svg) ==== -Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] and -the [LMDB project][lmdb]. The goal of the project is to provide a simple, +Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] +[LMDB project][lmdb]. The goal of the project is to provide a simple, fast, and reliable database for projects that don't require a full database server such as Postgres or MySQL. @@ -13,7 +13,6 @@ and setting values. That's it. [hyc_symas]: https://twitter.com/hyc_symas [lmdb]: http://symas.com/mdb/ - ## Project Status Bolt is stable and the API is fixed. Full unit test coverage and randomized @@ -22,6 +21,36 @@ Bolt is currently in high-load production environments serving databases as large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed services every day. +## Table of Contents + +- [Getting Started](#getting-started) + - [Installing](#installing) + - [Opening a database](#opening-a-database) + - [Transactions](#transactions) + - [Read-write transactions](#read-write-transactions) + - [Read-only transactions](#read-only-transactions) + - [Batch read-write transactions](#batch-read-write-transactions) + - [Managing transactions manually](#managing-transactions-manually) + - [Using buckets](#using-buckets) + - [Using key/value pairs](#using-keyvalue-pairs) + - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) + - [Iterating over keys](#iterating-over-keys) + - [Prefix scans](#prefix-scans) + - [Range scans](#range-scans) + - [ForEach()](#foreach) + - [Nested buckets](#nested-buckets) + - [Database backups](#database-backups) + - [Statistics](#statistics) + - [Read-Only Mode](#read-only-mode) + - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) +- [Resources](#resources) +- [Comparison with other databases](#comparison-with-other-databases) + - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) + - [LevelDB, RocksDB](#leveldb-rocksdb) + - [LMDB](#lmdb) +- [Caveats & Limitations](#caveats--limitations) +- [Reading the Source](#reading-the-source) +- [Other Projects Using Bolt](#other-projects-using-bolt) ## Getting Started @@ -180,8 +209,8 @@ and then safely close your transaction if an error is returned. This is the recommended way to use Bolt transactions. However, sometimes you may want to manually start and end your transactions. -You can use the `Tx.Begin()` function directly but _please_ be sure to close the -transaction. +You can use the `Tx.Begin()` function directly but **please** be sure to close +the transaction. ```go // Start a writable transaction. @@ -256,7 +285,7 @@ db.View(func(tx *bolt.Tx) error { ``` The `Get()` function does not return an error because its operation is -guarenteed to work (unless there is some kind of system failure). If the key +guaranteed to work (unless there is some kind of system failure). If the key exists then it will return its byte slice value. If it doesn't exist then it will return `nil`. It's important to note that you can have a zero-length value set to a key which is different than the key not existing. @@ -268,6 +297,49 @@ transaction is open. If you need to use a value outside of the transaction then you must use `copy()` to copy it to another byte slice. +### Autoincrementing integer for the bucket +By using the `NextSequence()` function, you can let Bolt determine a sequence +which can be used as the unique identifier for your key/value pairs. See the +example below. + +```go +// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. +func (s *Store) CreateUser(u *User) error { + return s.db.Update(func(tx *bolt.Tx) error { + // Retrieve the users bucket. + // This should be created when the DB is first opened. + b := tx.Bucket([]byte("users")) + + // Generate ID for the user. + // This returns an error only if the Tx is closed or not writeable. + // That can't happen in an Update() call so I ignore the error check. + id, _ = b.NextSequence() + u.ID = int(id) + + // Marshal user data into bytes. + buf, err := json.Marshal(u) + if err != nil { + return err + } + + // Persist bytes to users bucket. + return b.Put(itob(u.ID), buf) + }) +} + +// itob returns an 8-byte big endian representation of v. +func itob(v int) []byte { + b := make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(v)) + return b +} + +type User struct { + ID int + ... +} +``` + ### Iterating over keys Bolt stores its keys in byte-sorted order within a bucket. This makes sequential @@ -276,7 +348,9 @@ iteration over these keys extremely fast. To iterate over keys we'll use a ```go db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys b := tx.Bucket([]byte("MyBucket")) + c := b.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { @@ -300,10 +374,15 @@ Next() Move to the next key. Prev() Move to the previous key. ``` -When you have iterated to the end of the cursor then `Next()` will return `nil`. -You must seek to a position using `First()`, `Last()`, or `Seek()` before -calling `Next()` or `Prev()`. If you do not seek to a position then these -functions will return `nil`. +Each of those functions has a return signature of `(key []byte, value []byte)`. +When you have iterated to the end of the cursor then `Next()` will return a +`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` +before calling `Next()` or `Prev()`. If you do not seek to a position then +these functions will return a `nil` key. + +During iteration, if the key is non-`nil` but the value is `nil`, that means +the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to +access the sub-bucket. #### Prefix scans @@ -312,6 +391,7 @@ To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: ```go db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys c := tx.Bucket([]byte("MyBucket")).Cursor() prefix := []byte("1234") @@ -331,7 +411,7 @@ date range like this: ```go db.View(func(tx *bolt.Tx) error { - // Assume our events bucket has RFC3339 encoded time keys. + // Assume our events bucket exists and has RFC3339 encoded time keys. c := tx.Bucket([]byte("Events")).Cursor() // Our time range spans the 90's decade. @@ -355,7 +435,9 @@ all the keys in a bucket: ```go db.View(func(tx *bolt.Tx) error { + // Assume bucket exists and has keys b := tx.Bucket([]byte("MyBucket")) + b.ForEach(func(k, v []byte) error { fmt.Printf("key=%s, value=%s\n", k, v) return nil @@ -382,8 +464,11 @@ func (*Bucket) DeleteBucket(key []byte) error Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` function to write a consistent view of the database to a writer. If you call this from a read-only transaction, it will perform a hot backup and not block -your other database reads and writes. It will also use `O_DIRECT` when available -to prevent page cache trashing. +your other database reads and writes. + +By default, it will use a regular file handle which will utilize the operating +system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx) +documentation for information about optimizing for larger-than-RAM datasets. One common use case is to backup over HTTP so you can use tools like `cURL` to do database backups: @@ -465,6 +550,84 @@ if err != nil { } ``` +### Mobile Use (iOS/Android) + +Bolt is able to run on mobile devices by leveraging the binding feature of the +[gomobile](https://github.com/golang/mobile) tool. Create a struct that will +contain your database logic and a reference to a `*bolt.DB` with a initializing +contstructor that takes in a filepath where the database file will be stored. +Neither Android nor iOS require extra permissions or cleanup from using this method. + +```go +func NewBoltDB(filepath string) *BoltDB { + db, err := bolt.Open(filepath+"/demo.db", 0600, nil) + if err != nil { + log.Fatal(err) + } + + return &BoltDB{db} +} + +type BoltDB struct { + db *bolt.DB + ... +} + +func (b *BoltDB) Path() string { + return b.db.Path() +} + +func (b *BoltDB) Close() { + b.db.Close() +} +``` + +Database logic should be defined as methods on this wrapper struct. + +To initialize this struct from the native language (both platforms now sync +their local storage to the cloud. These snippets disable that functionality for the +database file): + +#### Android + +```java +String path; +if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ + path = getNoBackupFilesDir().getAbsolutePath(); +} else{ + path = getFilesDir().getAbsolutePath(); +} +Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) +``` + +#### iOS + +```objc +- (void)demo { + NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, + NSUserDomainMask, + YES) objectAtIndex:0]; + GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); + [self addSkipBackupAttributeToItemAtPath:demo.path]; + //Some DB Logic would go here + [demo close]; +} + +- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString +{ + NSURL* URL= [NSURL fileURLWithPath: filePathString]; + assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); + + NSError *error = nil; + BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] + forKey: NSURLIsExcludedFromBackupKey error: &error]; + if(!success){ + NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); + } + return success; +} + +``` ## Resources @@ -500,7 +663,7 @@ they are libraries bundled into the application, however, their underlying structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes random writes by using a write ahead log and multi-tiered, sorted files called SSTables. Bolt uses a B+tree internally and only a single file. Both approaches -have trade offs. +have trade-offs. If you require a high random write throughput (>10,000 w/sec) or you need to use spinning disks then LevelDB could be a good choice. If your application is @@ -536,9 +699,8 @@ It's important to pick the right tool for the job and Bolt is no exception. Here are a few things to note when evaluating and using Bolt: * Bolt is good for read intensive workloads. Sequential write performance is - also fast but random writes can be slow. You can add a write-ahead log or - [transaction coalescer](https://github.com/boltdb/coalescer) in front of Bolt - to mitigate this issue. + also fast but random writes can be slow. You can use `DB.Batch()` or add a + write-ahead log to help mitigate this issue. * Bolt uses a B+tree internally so there can be a lot of random page access. SSDs provide a significant performance boost over spinning disks. @@ -568,11 +730,13 @@ Here are a few things to note when evaluating and using Bolt: can in memory and will release memory as needed to other processes. This means that Bolt can show very high memory usage when working with large databases. However, this is expected and the OS will release memory as needed. Bolt can - handle databases much larger than the available physical RAM. + handle databases much larger than the available physical RAM, provided its + memory-map fits in the process virtual address space. It may be problematic + on 32-bits systems. * The data structures in the Bolt database are memory mapped so the data file will be endian specific. This means that you cannot copy a Bolt file from a - little endian machine to a big endian machine and have it work. For most + little endian machine to a big endian machine and have it work. For most users this is not a concern since most modern CPUs are little endian. * Because of the way pages are laid out on disk, Bolt cannot truncate data files @@ -587,6 +751,56 @@ Here are a few things to note when evaluating and using Bolt: [page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 +## Reading the Source + +Bolt is a relatively small code base (<3KLOC) for an embedded, serializable, +transactional key/value database so it can be a good starting point for people +interested in how databases work. + +The best places to start are the main entry points into Bolt: + +- `Open()` - Initializes the reference to the database. It's responsible for + creating the database if it doesn't exist, obtaining an exclusive lock on the + file, reading the meta pages, & memory-mapping the file. + +- `DB.Begin()` - Starts a read-only or read-write transaction depending on the + value of the `writable` argument. This requires briefly obtaining the "meta" + lock to keep track of open transactions. Only one read-write transaction can + exist at a time so the "rwlock" is acquired during the life of a read-write + transaction. + +- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the + arguments, a cursor is used to traverse the B+tree to the page and position + where they key & value will be written. Once the position is found, the bucket + materializes the underlying page and the page's parent pages into memory as + "nodes". These nodes are where mutations occur during read-write transactions. + These changes get flushed to disk during commit. + +- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor + to move to the page & position of a key/value pair. During a read-only + transaction, the key and value data is returned as a direct reference to the + underlying mmap file so there's no allocation overhead. For read-write + transactions, this data may reference the mmap file or one of the in-memory + node values. + +- `Cursor` - This object is simply for traversing the B+tree of on-disk pages + or in-memory nodes. It can seek to a specific key, move to the first or last + value, or it can move forward or backward. The cursor handles the movement up + and down the B+tree transparently to the end user. + +- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages + into pages to be written to disk. Writing to disk then occurs in two phases. + First, the dirty pages are written to disk and an `fsync()` occurs. Second, a + new meta page with an incremented transaction ID is written and another + `fsync()` occurs. This two phase write ensures that partially written data + pages are ignored in the event of a crash since the meta page pointing to them + is never written. Partially written meta pages are invalidated because they + are written with a checksum. + +If you have additional notes that could be helpful for others, please submit +them via pull request. + + ## Other Projects Using Bolt Below is a list of public, open source projects that use Bolt: @@ -597,25 +811,30 @@ Below is a list of public, open source projects that use Bolt: * [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. * [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. * [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. -* [ChainStore](https://github.com/nulayer/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. +* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. * [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. * [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". * [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. * [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. * [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. -* [photosite/session](http://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. +* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. * [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. * [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. * [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. * [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. * [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. * [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database. -* [Seaweed File System](https://github.com/chrislusf/weed-fs) - Highly scalable distributed key~file system with O(1) disk read. -* [InfluxDB](http://influxdb.com) - Scalable datastore for metrics, events, and real-time analytics. +* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. +* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. * [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. * [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. * [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. -* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistant, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. +* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. * [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. +* [stow](https://github.com/djherbis/stow) - a persistence manager for objects + backed by boltdb. +* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining + simple tx and key scans. +* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/batch.go b/Godeps/_workspace/src/github.com/boltdb/bolt/batch.go deleted file mode 100644 index 84acae6bbf0..00000000000 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/batch.go +++ /dev/null @@ -1,138 +0,0 @@ -package bolt - -import ( - "errors" - "fmt" - "sync" - "time" -) - -// Batch calls fn as part of a batch. It behaves similar to Update, -// except: -// -// 1. concurrent Batch calls can be combined into a single Bolt -// transaction. -// -// 2. the function passed to Batch may be called multiple times, -// regardless of whether it returns error or not. -// -// This means that Batch function side effects must be idempotent and -// take permanent effect only after a successful return is seen in -// caller. -// -// The maximum batch size and delay can be adjusted with DB.MaxBatchSize -// and DB.MaxBatchDelay, respectively. -// -// Batch is only useful when there are multiple goroutines calling it. -func (db *DB) Batch(fn func(*Tx) error) error { - errCh := make(chan error, 1) - - db.batchMu.Lock() - if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { - // There is no existing batch, or the existing batch is full; start a new one. - db.batch = &batch{ - db: db, - } - db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) - } - db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) - if len(db.batch.calls) >= db.MaxBatchSize { - // wake up batch, it's ready to run - go db.batch.trigger() - } - db.batchMu.Unlock() - - err := <-errCh - if err == trySolo { - err = db.Update(fn) - } - return err -} - -type call struct { - fn func(*Tx) error - err chan<- error -} - -type batch struct { - db *DB - timer *time.Timer - start sync.Once - calls []call -} - -// trigger runs the batch if it hasn't already been run. -func (b *batch) trigger() { - b.start.Do(b.run) -} - -// run performs the transactions in the batch and communicates results -// back to DB.Batch. -func (b *batch) run() { - b.db.batchMu.Lock() - b.timer.Stop() - // Make sure no new work is added to this batch, but don't break - // other batches. - if b.db.batch == b { - b.db.batch = nil - } - b.db.batchMu.Unlock() - -retry: - for len(b.calls) > 0 { - var failIdx = -1 - err := b.db.Update(func(tx *Tx) error { - for i, c := range b.calls { - if err := safelyCall(c.fn, tx); err != nil { - failIdx = i - return err - } - } - return nil - }) - - if failIdx >= 0 { - // take the failing transaction out of the batch. it's - // safe to shorten b.calls here because db.batch no longer - // points to us, and we hold the mutex anyway. - c := b.calls[failIdx] - b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] - // tell the submitter re-run it solo, continue with the rest of the batch - c.err <- trySolo - continue retry - } - - // pass success, or bolt internal errors, to all callers - for _, c := range b.calls { - if c.err != nil { - c.err <- err - } - } - break retry - } -} - -// trySolo is a special sentinel error value used for signaling that a -// transaction function should be re-run. It should never be seen by -// callers. -var trySolo = errors.New("batch function returned an error and should be re-run solo") - -type panicked struct { - reason interface{} -} - -func (p panicked) Error() string { - if err, ok := p.reason.(error); ok { - return err.Error() - } - return fmt.Sprintf("panic: %v", p.reason) -} - -func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { - defer func() { - if p := recover(); p != nil { - err = panicked{p} - } - }() - return fn(tx) -} diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm64.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm64.go new file mode 100644 index 00000000000..6d2309352e0 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_arm64.go @@ -0,0 +1,9 @@ +// +build arm64 + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go index e9d1c907b63..2b676661409 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_linux.go @@ -4,8 +4,6 @@ import ( "syscall" ) -var odirect = syscall.O_DIRECT - // fdatasync flushes written data to a file descriptor. func fdatasync(db *DB) error { return syscall.Fdatasync(int(db.file.Fd())) diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go index 7c1bef1a4f4..7058c3d734e 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_openbsd.go @@ -11,8 +11,6 @@ const ( msInvalidate // invalidate cached data ) -var odirect int - func msync(db *DB) error { _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) if errno != 0 { diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_ppc64le.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_ppc64le.go new file mode 100644 index 00000000000..8351e129f6a --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_ppc64le.go @@ -0,0 +1,9 @@ +// +build ppc64le + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_s390x.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_s390x.go new file mode 100644 index 00000000000..f4dd26bbba7 --- /dev/null +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_s390x.go @@ -0,0 +1,9 @@ +// +build s390x + +package bolt + +// maxMapSize represents the largest mmap size supported by Bolt. +const maxMapSize = 0xFFFFFFFFFFFF // 256TB + +// maxAllocSize is the size used when creating array pointers. +const maxAllocSize = 0x7FFFFFFF diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go index 6eef6b22035..4b0723aac23 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix.go @@ -46,19 +46,8 @@ func funlock(f *os.File) error { // mmap memory maps a DB's data file. func mmap(db *DB, sz int) error { - // Truncate and fsync to ensure file size metadata is flushed. - // https://github.com/boltdb/bolt/issues/284 - if !db.NoGrowSync && !db.readOnly { - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("file resize error: %s", err) - } - if err := db.file.Sync(); err != nil { - return fmt.Errorf("file sync error: %s", err) - } - } - // Map the data file to memory. - b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED) + b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) if err != nil { return err } diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix_solaris.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix_solaris.go index f480ee76d1f..1c4e48d63a0 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix_solaris.go +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_unix_solaris.go @@ -1,4 +1,3 @@ - package bolt import ( @@ -7,6 +6,7 @@ import ( "syscall" "time" "unsafe" + "golang.org/x/sys/unix" ) @@ -56,19 +56,8 @@ func funlock(f *os.File) error { // mmap memory maps a DB's data file. func mmap(db *DB, sz int) error { - // Truncate and fsync to ensure file size metadata is flushed. - // https://github.com/boltdb/bolt/issues/284 - if !db.NoGrowSync && !db.readOnly { - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("file resize error: %s", err) - } - if err := db.file.Sync(); err != nil { - return fmt.Errorf("file sync error: %s", err) - } - } - // Map the data file to memory. - b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED) + b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) if err != nil { return err } diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go index 8b782be5f9e..91c4968f6a1 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bolt_windows.go @@ -8,7 +8,37 @@ import ( "unsafe" ) -var odirect int +// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") +) + +const ( + // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx + flagLockExclusive = 2 + flagLockFailImmediately = 1 + + // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx + errLockViolation syscall.Errno = 0x21 +) + +func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) + if r == 0 { + return err + } + return nil +} + +func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { + r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) + if r == 0 { + return err + } + return nil +} // fdatasync flushes written data to a file descriptor. func fdatasync(db *DB) error { @@ -16,13 +46,37 @@ func fdatasync(db *DB) error { } // flock acquires an advisory lock on a file descriptor. -func flock(f *os.File, _ bool, _ time.Duration) error { - return nil +func flock(f *os.File, exclusive bool, timeout time.Duration) error { + var t time.Time + for { + // If we're beyond our timeout then return an error. + // This can only occur after we've attempted a flock once. + if t.IsZero() { + t = time.Now() + } else if timeout > 0 && time.Since(t) > timeout { + return ErrTimeout + } + + var flag uint32 = flagLockFailImmediately + if exclusive { + flag |= flagLockExclusive + } + + err := lockFileEx(syscall.Handle(f.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) + if err == nil { + return nil + } else if err != errLockViolation { + return err + } + + // Wait for a bit and try again. + time.Sleep(50 * time.Millisecond) + } } // funlock releases an advisory lock on a file descriptor. func funlock(f *os.File) error { - return nil + return unlockFileEx(syscall.Handle(f.Fd()), 0, 1, 0, &syscall.Overlapped{}) } // mmap memory maps a DB's data file. diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go b/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go index 8db89776fe6..f50442523c3 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/boltsync_unix.go @@ -2,8 +2,6 @@ package bolt -var odirect int - // fdatasync flushes written data to a file descriptor. func fdatasync(db *DB) error { return db.file.Sync() diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go b/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go index 6766992100f..d2f8c524e42 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/bucket.go @@ -11,7 +11,7 @@ const ( MaxKeySize = 32768 // MaxValueSize is the maximum length of a value, in bytes. - MaxValueSize = 4294967295 + MaxValueSize = (1 << 31) - 2 ) const ( @@ -99,6 +99,7 @@ func (b *Bucket) Cursor() *Cursor { // Bucket retrieves a nested bucket by name. // Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. func (b *Bucket) Bucket(name []byte) *Bucket { if b.buckets != nil { if child := b.buckets[string(name)]; child != nil { @@ -148,6 +149,7 @@ func (b *Bucket) openBucket(value []byte) *Bucket { // CreateBucket creates a new bucket at the given key and returns the new bucket. // Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { if b.tx.db == nil { return nil, ErrTxClosed @@ -192,6 +194,7 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { // CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. // Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { child, err := b.CreateBucket(key) if err == ErrBucketExists { @@ -270,6 +273,7 @@ func (b *Bucket) Get(key []byte) []byte { // Put sets the value for a key in the bucket. // If the key exist then its previous value will be overwritten. +// Supplied value must remain valid for the life of the transaction. // Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. func (b *Bucket) Put(key []byte, value []byte) error { if b.tx.db == nil { @@ -346,7 +350,8 @@ func (b *Bucket) NextSequence() (uint64, error) { // ForEach executes a function for each key/value pair in a bucket. // If the provided function returns an error then the iteration is stopped and -// the error is returned to the caller. +// the error is returned to the caller. The provided function must not modify +// the bucket; this will result in undefined behavior. func (b *Bucket) ForEach(fn func(k, v []byte) error) error { if b.tx.db == nil { return ErrTxClosed diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go b/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go index c41ebe404d9..b96e6f73511 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/cmd/bolt/main.go @@ -825,7 +825,10 @@ func (cmd *StatsCommand) Run(args ...string) error { fmt.Fprintln(cmd.Stdout, "Bucket statistics") fmt.Fprintf(cmd.Stdout, "\tTotal number of buckets: %d\n", s.BucketN) - percentage = int(float32(s.InlineBucketN) * 100.0 / float32(s.BucketN)) + percentage = 0 + if s.BucketN != 0 { + percentage = int(float32(s.InlineBucketN) * 100.0 / float32(s.BucketN)) + } fmt.Fprintf(cmd.Stdout, "\tTotal number on inlined buckets: %d (%d%%)\n", s.InlineBucketN, percentage) percentage = 0 if s.LeafInuse != 0 { diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go b/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go index 006c54889e8..1be9f35e3ef 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/cursor.go @@ -34,6 +34,13 @@ func (c *Cursor) First() (key []byte, value []byte) { p, n := c.bucket.pageNode(c.bucket.root) c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) c.first() + + // If we land on an empty page then move to the next value. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + c.next() + } + k, v, flags := c.keyValue() if (flags & uint32(bucketLeafFlag)) != 0 { return k, nil @@ -209,28 +216,37 @@ func (c *Cursor) last() { // next moves to the next leaf element and returns the key and value. // If the cursor is at the last leaf element then it stays there and returns nil. func (c *Cursor) next() (key []byte, value []byte, flags uint32) { - // Attempt to move over one element until we're successful. - // Move up the stack as we hit the end of each page in our stack. - var i int - for i = len(c.stack) - 1; i >= 0; i-- { - elem := &c.stack[i] - if elem.index < elem.count()-1 { - elem.index++ - break + for { + // Attempt to move over one element until we're successful. + // Move up the stack as we hit the end of each page in our stack. + var i int + for i = len(c.stack) - 1; i >= 0; i-- { + elem := &c.stack[i] + if elem.index < elem.count()-1 { + elem.index++ + break + } } - } - // If we've hit the root page then stop and return. This will leave the - // cursor on the last element of the last page. - if i == -1 { - return nil, nil, 0 - } + // If we've hit the root page then stop and return. This will leave the + // cursor on the last element of the last page. + if i == -1 { + return nil, nil, 0 + } - // Otherwise start from where we left off in the stack and find the - // first element of the first leaf page. - c.stack = c.stack[:i+1] - c.first() - return c.keyValue() + // Otherwise start from where we left off in the stack and find the + // first element of the first leaf page. + c.stack = c.stack[:i+1] + c.first() + + // If this is an empty page then restart and move back up the stack. + // https://github.com/boltdb/bolt/issues/450 + if c.stack[len(c.stack)-1].count() == 0 { + continue + } + + return c.keyValue() + } } // search recursively performs a binary search against a given page/node until it finds a given key. diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/db.go b/Godeps/_workspace/src/github.com/boltdb/bolt/db.go index d39c4aa9cce..0f1e1bc3d74 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/db.go +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/db.go @@ -1,8 +1,10 @@ package bolt import ( + "errors" "fmt" "hash/fnv" + "log" "os" "runtime" "runtime/debug" @@ -24,13 +26,14 @@ const magic uint32 = 0xED0CDAED // IgnoreNoSync specifies whether the NoSync field of a DB is ignored when // syncing changes to a file. This is required as some operating systems, // such as OpenBSD, do not have a unified buffer cache (UBC) and writes -// must be synchronzied using the msync(2) syscall. +// must be synchronized using the msync(2) syscall. const IgnoreNoSync = runtime.GOOS == "openbsd" // Default values if not set in a DB instance. const ( DefaultMaxBatchSize int = 1000 DefaultMaxBatchDelay = 10 * time.Millisecond + DefaultAllocSize = 16 * 1024 * 1024 ) // DB represents a collection of buckets persisted to a file on disk. @@ -63,6 +66,10 @@ type DB struct { // https://github.com/boltdb/bolt/issues/284 NoGrowSync bool + // If you want to read the entire database fast, you can set MmapFlag to + // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. + MmapFlags int + // MaxBatchSize is the maximum size of a batch. Default value is // copied from DefaultMaxBatchSize in Open. // @@ -79,11 +86,17 @@ type DB struct { // Do not change concurrently with calls to Batch. MaxBatchDelay time.Duration + // AllocSize is the amount of space allocated when the database + // needs to create new pages. This is done to amortize the cost + // of truncate() and fsync() when growing the data file. + AllocSize int + path string file *os.File dataref []byte // mmap'ed readonly, write throws SEGV data *[maxMapSize]byte datasz int + filesz int // current on disk file size meta0 *meta meta1 *meta pageSize int @@ -136,10 +149,12 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { options = DefaultOptions } db.NoGrowSync = options.NoGrowSync + db.MmapFlags = options.MmapFlags // Set default values for later DB operations. db.MaxBatchSize = DefaultMaxBatchSize db.MaxBatchDelay = DefaultMaxBatchDelay + db.AllocSize = DefaultAllocSize flag := os.O_RDWR if options.ReadOnly { @@ -172,7 +187,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { // Initialize the database if it doesn't exist. if info, err := db.file.Stat(); err != nil { - return nil, fmt.Errorf("stat error: %s", err) + return nil, err } else if info.Size() == 0 { // Initialize new files with meta pages. if err := db.init(); err != nil { @@ -184,14 +199,14 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) { if _, err := db.file.ReadAt(buf[:], 0); err == nil { m := db.pageInBuffer(buf[:], 0).meta() if err := m.validate(); err != nil { - return nil, fmt.Errorf("meta0 error: %s", err) + return nil, err } db.pageSize = int(m.pageSize) } } // Memory map the data file. - if err := db.mmap(0); err != nil { + if err := db.mmap(options.InitialMmapSize); err != nil { _ = db.close() return nil, err } @@ -248,10 +263,10 @@ func (db *DB) mmap(minsz int) error { // Validate the meta pages. if err := db.meta0.validate(); err != nil { - return fmt.Errorf("meta0 error: %s", err) + return err } if err := db.meta1.validate(); err != nil { - return fmt.Errorf("meta1 error: %s", err) + return err } return nil @@ -266,7 +281,7 @@ func (db *DB) munmap() error { } // mmapSize determines the appropriate size for the mmap given the current size -// of the database. The minimum size is 1MB and doubles until it reaches 1GB. +// of the database. The minimum size is 32KB and doubles until it reaches 1GB. // Returns an error if the new mmap size is greater than the max allowed. func (db *DB) mmapSize(size int) (int, error) { // Double the size from 32KB until 1GB. @@ -382,7 +397,9 @@ func (db *DB) close() error { // No need to unlock read-only file. if !db.readOnly { // Unlock the file. - _ = funlock(db.file) + if err := funlock(db.file); err != nil { + log.Printf("bolt.Close(): funlock error: %s", err) + } } // Close the file descriptor. @@ -401,11 +418,15 @@ func (db *DB) close() error { // will cause the calls to block and be serialized until the current write // transaction finishes. // -// Transactions should not be depedent on one another. Opening a read +// Transactions should not be dependent on one another. Opening a read // transaction and a write transaction in the same goroutine can cause the // writer to deadlock because the database periodically needs to re-mmap itself // as it grows and it cannot do that while a read transaction is open. // +// If a long running read transaction (for example, a snapshot transaction) is +// needed, you might want to set DB.InitialMmapSize to a large enough value +// to avoid potential blocking of write transaction. +// // IMPORTANT: You must close read-only transactions after you are finished or // else the database will not reclaim old pages. func (db *DB) Begin(writable bool) (*Tx, error) { @@ -589,6 +610,136 @@ func (db *DB) View(fn func(*Tx) error) error { return nil } +// Batch calls fn as part of a batch. It behaves similar to Update, +// except: +// +// 1. concurrent Batch calls can be combined into a single Bolt +// transaction. +// +// 2. the function passed to Batch may be called multiple times, +// regardless of whether it returns error or not. +// +// This means that Batch function side effects must be idempotent and +// take permanent effect only after a successful return is seen in +// caller. +// +// The maximum batch size and delay can be adjusted with DB.MaxBatchSize +// and DB.MaxBatchDelay, respectively. +// +// Batch is only useful when there are multiple goroutines calling it. +func (db *DB) Batch(fn func(*Tx) error) error { + errCh := make(chan error, 1) + + db.batchMu.Lock() + if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { + // There is no existing batch, or the existing batch is full; start a new one. + db.batch = &batch{ + db: db, + } + db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) + } + db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) + if len(db.batch.calls) >= db.MaxBatchSize { + // wake up batch, it's ready to run + go db.batch.trigger() + } + db.batchMu.Unlock() + + err := <-errCh + if err == trySolo { + err = db.Update(fn) + } + return err +} + +type call struct { + fn func(*Tx) error + err chan<- error +} + +type batch struct { + db *DB + timer *time.Timer + start sync.Once + calls []call +} + +// trigger runs the batch if it hasn't already been run. +func (b *batch) trigger() { + b.start.Do(b.run) +} + +// run performs the transactions in the batch and communicates results +// back to DB.Batch. +func (b *batch) run() { + b.db.batchMu.Lock() + b.timer.Stop() + // Make sure no new work is added to this batch, but don't break + // other batches. + if b.db.batch == b { + b.db.batch = nil + } + b.db.batchMu.Unlock() + +retry: + for len(b.calls) > 0 { + var failIdx = -1 + err := b.db.Update(func(tx *Tx) error { + for i, c := range b.calls { + if err := safelyCall(c.fn, tx); err != nil { + failIdx = i + return err + } + } + return nil + }) + + if failIdx >= 0 { + // take the failing transaction out of the batch. it's + // safe to shorten b.calls here because db.batch no longer + // points to us, and we hold the mutex anyway. + c := b.calls[failIdx] + b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] + // tell the submitter re-run it solo, continue with the rest of the batch + c.err <- trySolo + continue retry + } + + // pass success, or bolt internal errors, to all callers + for _, c := range b.calls { + if c.err != nil { + c.err <- err + } + } + break retry + } +} + +// trySolo is a special sentinel error value used for signaling that a +// transaction function should be re-run. It should never be seen by +// callers. +var trySolo = errors.New("batch function returned an error and should be re-run solo") + +type panicked struct { + reason interface{} +} + +func (p panicked) Error() string { + if err, ok := p.reason.(error); ok { + return err.Error() + } + return fmt.Sprintf("panic: %v", p.reason) +} + +func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { + defer func() { + if p := recover(); p != nil { + err = panicked{p} + } + }() + return fn(tx) +} + // Sync executes fdatasync() against the database file handle. // // This is not necessary under normal operation, however, if you use NoSync @@ -655,6 +806,36 @@ func (db *DB) allocate(count int) (*page, error) { return p, nil } +// grow grows the size of the database to the given sz. +func (db *DB) grow(sz int) error { + // Ignore if the new size is less than available file size. + if sz <= db.filesz { + return nil + } + + // If the data is smaller than the alloc size then only allocate what's needed. + // Once it goes over the allocation size then allocate in chunks. + if db.datasz < db.AllocSize { + sz = db.datasz + } else { + sz += db.AllocSize + } + + // Truncate and fsync to ensure file size metadata is flushed. + // https://github.com/boltdb/bolt/issues/284 + if !db.NoGrowSync && !db.readOnly { + if err := db.file.Truncate(int64(sz)); err != nil { + return fmt.Errorf("file resize error: %s", err) + } + if err := db.file.Sync(); err != nil { + return fmt.Errorf("file sync error: %s", err) + } + } + + db.filesz = sz + return nil +} + func (db *DB) IsReadOnly() bool { return db.readOnly } @@ -672,6 +853,19 @@ type Options struct { // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to // grab a shared lock (UNIX). ReadOnly bool + + // Sets the DB.MmapFlags flag before memory mapping the file. + MmapFlags int + + // InitialMmapSize is the initial mmap size of the database + // in bytes. Read transactions won't block write transaction + // if the InitialMmapSize is large enough to hold database mmap + // size. (See DB.Begin for more information) + // + // If <=0, the initial map size is 0. + // If initialMmapSize is smaller than the previous database size, + // it takes no effect. + InitialMmapSize int } // DefaultOptions represent the options used if nil options are passed into Open(). diff --git a/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go b/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go index 6b52b2c8964..e74d2cae760 100644 --- a/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go +++ b/Godeps/_workspace/src/github.com/boltdb/bolt/tx.go @@ -29,6 +29,14 @@ type Tx struct { pages map[pgid]*page stats TxStats commitHandlers []func() + + // WriteFlag specifies the flag for write-related methods like WriteTo(). + // Tx opens the database file with the specified flag to copy the data. + // + // By default, the flag is unset, which works well for mostly in-memory + // workloads. For databases that are much larger than available RAM, + // set the flag to syscall.O_DIRECT to avoid trashing the page cache. + WriteFlag int } // init initializes the transaction. @@ -87,18 +95,21 @@ func (tx *Tx) Stats() TxStats { // Bucket retrieves a bucket by name. // Returns nil if the bucket does not exist. +// The bucket instance is only valid for the lifetime of the transaction. func (tx *Tx) Bucket(name []byte) *Bucket { return tx.root.Bucket(name) } // CreateBucket creates a new bucket. // Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { return tx.root.CreateBucket(name) } // CreateBucketIfNotExists creates a new bucket if it doesn't already exist. // Returns an error if the bucket name is blank, or if the bucket name is too long. +// The bucket instance is only valid for the lifetime of the transaction. func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { return tx.root.CreateBucketIfNotExists(name) } @@ -157,6 +168,8 @@ func (tx *Tx) Commit() error { // Free the old root bucket. tx.meta.root.root = tx.root.root + opgid := tx.meta.pgid + // Free the freelist and allocate new pages for it. This will overestimate // the size of the freelist but not underestimate the size (which would be bad). tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) @@ -171,6 +184,14 @@ func (tx *Tx) Commit() error { } tx.meta.freelist = p.id + // If the high water mark has moved up then attempt to grow the database. + if tx.meta.pgid > opgid { + if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { + tx.rollback() + return err + } + } + // Write dirty pages to disk. startTime = time.Now() if err := tx.write(); err != nil { @@ -236,7 +257,8 @@ func (tx *Tx) close() { var freelistPendingN = tx.db.freelist.pending_count() var freelistAlloc = tx.db.freelist.size() - // Remove writer lock. + // Remove transaction ref & writer lock. + tx.db.rwtx = nil tx.db.rwlock.Unlock() // Merge statistics. @@ -250,11 +272,16 @@ func (tx *Tx) close() { } else { tx.db.removeTx(tx) } + + // Clear all references. tx.db = nil + tx.meta = nil + tx.root = Bucket{tx: tx} + tx.pages = nil } // Copy writes the entire database to a writer. -// This function exists for backwards compatibility. Use WriteTo() in +// This function exists for backwards compatibility. Use WriteTo() instead. func (tx *Tx) Copy(w io.Writer) error { _, err := tx.WriteTo(w) return err @@ -263,21 +290,18 @@ func (tx *Tx) Copy(w io.Writer) error { // WriteTo writes the entire database to a writer. // If err == nil then exactly tx.Size() bytes will be written into the writer. func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { - // Attempt to open reader directly. - var f *os.File - if f, err = os.OpenFile(tx.db.path, os.O_RDONLY|odirect, 0); err != nil { - // Fallback to a regular open if that doesn't work. - if f, err = os.OpenFile(tx.db.path, os.O_RDONLY, 0); err != nil { - return 0, err - } + // Attempt to open reader with WriteFlag + f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) + if err != nil { + return 0, err } + defer func() { _ = f.Close() }() // Copy the meta pages. tx.db.metalock.Lock() n, err = io.CopyN(w, f, int64(tx.db.pageSize*2)) tx.db.metalock.Unlock() if err != nil { - _ = f.Close() return n, fmt.Errorf("meta copy: %s", err) } @@ -285,7 +309,6 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) n += wn if err != nil { - _ = f.Close() return n, err } @@ -492,7 +515,7 @@ func (tx *Tx) writeMeta() error { } // page returns a reference to the page with a given id. -// If page has been written to then a temporary bufferred page is returned. +// If page has been written to then a temporary buffered page is returned. func (tx *Tx) page(id pgid) *page { // Check the dirty pages first. if tx.pages != nil { diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/.travis.yml b/Godeps/_workspace/src/github.com/onsi/ginkgo/.travis.yml index 44018acc059..f0e67b84abe 100644 --- a/Godeps/_workspace/src/github.com/onsi/ginkgo/.travis.yml +++ b/Godeps/_workspace/src/github.com/onsi/ginkgo/.travis.yml @@ -2,13 +2,14 @@ language: go go: - 1.3 - 1.4 + - 1.5 - tip install: - - go get -v ./... + - go get -v -t ./... - go get golang.org/x/tools/cmd/cover - go get github.com/onsi/gomega - go install github.com/onsi/ginkgo/ginkgo - export PATH=$PATH:$HOME/gopath/bin -script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --failOnPending --randomizeSuites --race +script: $HOME/gopath/bin/ginkgo -r --randomizeAllSpecs --randomizeSuites --race --trace diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/CHANGELOG.md b/Godeps/_workspace/src/github.com/onsi/ginkgo/CHANGELOG.md index ccba8abe7cc..438c8c1ec46 100644 --- a/Godeps/_workspace/src/github.com/onsi/ginkgo/CHANGELOG.md +++ b/Godeps/_workspace/src/github.com/onsi/ginkgo/CHANGELOG.md @@ -3,6 +3,7 @@ Improvements: - `Skip(message)` can be used to skip the current test. +- Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests) Bug Fixes: diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/config/config.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/config/config.go index 46ce16aa68d..7b22e34ad97 100644 --- a/Godeps/_workspace/src/github.com/onsi/ginkgo/config/config.go +++ b/Godeps/_workspace/src/github.com/onsi/ginkgo/config/config.go @@ -79,7 +79,7 @@ func Flags(flagSet *flag.FlagSet, prefix string, includeParallelFlags bool) { } flagSet.BoolVar(&(DefaultReporterConfig.NoColor), prefix+"noColor", false, "If set, suppress color output in default reporter.") - flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter (default: 5 seconds).") + flagSet.Float64Var(&(DefaultReporterConfig.SlowSpecThreshold), prefix+"slowSpecThreshold", 5.0, "(in seconds) Specs that take longer to run than this threshold are flagged as slow by the default reporter.") flagSet.BoolVar(&(DefaultReporterConfig.NoisyPendings), prefix+"noisyPendings", true, "If set, default reporter will shout about pending tests.") flagSet.BoolVar(&(DefaultReporterConfig.Verbose), prefix+"v", false, "If set, default reporter print out all specs as they begin.") flagSet.BoolVar(&(DefaultReporterConfig.Succinct), prefix+"succinct", false, "If set, default reporter prints out a very succinct report") diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/extensions/table/table.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/extensions/table/table.go new file mode 100644 index 00000000000..ae8ab7d248f --- /dev/null +++ b/Godeps/_workspace/src/github.com/onsi/ginkgo/extensions/table/table.go @@ -0,0 +1,98 @@ +/* + +Table provides a simple DSL for Ginkgo-native Table-Driven Tests + +The godoc documentation describes Table's API. More comprehensive documentation (with examples!) is available at http://onsi.github.io/ginkgo#table-driven-tests + +*/ + +package table + +import ( + "fmt" + "reflect" + + "github.com/onsi/ginkgo" +) + +/* +DescribeTable describes a table-driven test. + +For example: + + DescribeTable("a simple table", + func(x int, y int, expected bool) { + Ω(x > y).Should(Equal(expected)) + }, + Entry("x > y", 1, 0, true), + Entry("x == y", 0, 0, false), + Entry("x < y", 0, 1, false), + ) + +The first argument to `DescribeTable` is a string description. +The second argument is a function that will be run for each table entry. Your assertions go here - the function is equivalent to a Ginkgo It. +The subsequent arguments must be of type `TableEntry`. We recommend using the `Entry` convenience constructors. + +The `Entry` constructor takes a string description followed by an arbitrary set of parameters. These parameters are passed into your function. + +Under the hood, `DescribeTable` simply generates a new Ginkgo `Describe`. Each `Entry` is turned into an `It` within the `Describe`. + +It's important to understand that the `Describe`s and `It`s are generated at evaluation time (i.e. when Ginkgo constructs the tree of tests and before the tests run). + +Individual Entries can be focused (with FEntry) or marked pending (with PEntry or XEntry). In addition, the entire table can be focused or marked pending with FDescribeTable and PDescribeTable/XDescribeTable. +*/ +func DescribeTable(description string, itBody interface{}, entries ...TableEntry) bool { + describeTable(description, itBody, entries, false, false) + return true +} + +/* +You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`. +*/ +func FDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool { + describeTable(description, itBody, entries, false, true) + return true +} + +/* +You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`. +*/ +func PDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool { + describeTable(description, itBody, entries, true, false) + return true +} + +/* +You can mark a table as pending with `XDescribeTable`. This is equivalent to `XDescribe`. +*/ +func XDescribeTable(description string, itBody interface{}, entries ...TableEntry) bool { + describeTable(description, itBody, entries, true, false) + return true +} + +func describeTable(description string, itBody interface{}, entries []TableEntry, pending bool, focused bool) { + itBodyValue := reflect.ValueOf(itBody) + if itBodyValue.Kind() != reflect.Func { + panic(fmt.Sprintf("DescribeTable expects a function, got %#v", itBody)) + } + + if pending { + ginkgo.PDescribe(description, func() { + for _, entry := range entries { + entry.generateIt(itBodyValue) + } + }) + } else if focused { + ginkgo.FDescribe(description, func() { + for _, entry := range entries { + entry.generateIt(itBodyValue) + } + }) + } else { + ginkgo.Describe(description, func() { + for _, entry := range entries { + entry.generateIt(itBodyValue) + } + }) + } +} diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/extensions/table/table_entry.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/extensions/table/table_entry.go new file mode 100644 index 00000000000..5fa645bceee --- /dev/null +++ b/Godeps/_workspace/src/github.com/onsi/ginkgo/extensions/table/table_entry.go @@ -0,0 +1,81 @@ +package table + +import ( + "reflect" + + "github.com/onsi/ginkgo" +) + +/* +TableEntry represents an entry in a table test. You generally use the `Entry` constructor. +*/ +type TableEntry struct { + Description string + Parameters []interface{} + Pending bool + Focused bool +} + +func (t TableEntry) generateIt(itBody reflect.Value) { + if t.Pending { + ginkgo.PIt(t.Description) + return + } + + values := []reflect.Value{} + for i, param := range t.Parameters { + var value reflect.Value + + if param == nil { + inType := itBody.Type().In(i) + value = reflect.Zero(inType) + } else { + value = reflect.ValueOf(param) + } + + values = append(values, value) + } + + body := func() { + itBody.Call(values) + } + + if t.Focused { + ginkgo.FIt(t.Description, body) + } else { + ginkgo.It(t.Description, body) + } +} + +/* +Entry constructs a TableEntry. + +The first argument is a required description (this becomes the content of the generated Ginkgo `It`). +Subsequent parameters are saved off and sent to the callback passed in to `DescribeTable`. + +Each Entry ends up generating an individual Ginkgo It. +*/ +func Entry(description string, parameters ...interface{}) TableEntry { + return TableEntry{description, parameters, false, false} +} + +/* +You can focus a particular entry with FEntry. This is equivalent to FIt. +*/ +func FEntry(description string, parameters ...interface{}) TableEntry { + return TableEntry{description, parameters, false, true} +} + +/* +You can mark a particular entry as pending with PEntry. This is equivalent to PIt. +*/ +func PEntry(description string, parameters ...interface{}) TableEntry { + return TableEntry{description, parameters, true, false} +} + +/* +You can mark a particular entry as pending with XEntry. This is equivalent to XIt. +*/ +func XEntry(description string, parameters ...interface{}) TableEntry { + return TableEntry{description, parameters, true, false} +} diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/nodot_command.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/nodot_command.go index e1a2e13099a..212235bae0a 100644 --- a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/nodot_command.go +++ b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/nodot_command.go @@ -63,6 +63,8 @@ func findSuiteFile() (string, os.FileMode) { if err != nil { complainAndQuit("Could not find suite file for nodot: " + err.Error()) } + defer f.Close() + if re.MatchReader(bufio.NewReader(f)) { return path, file.Mode() } diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go index 1135c334d6f..a1e47ba18b3 100644 --- a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go +++ b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go @@ -101,13 +101,18 @@ func (t *TestRunner) CompileTo(path string) error { } if fileExists(path) == false { - compiledFile := filepath.Join(t.Suite.Path, t.Suite. PackageName+".test") + compiledFile := filepath.Join(t.Suite.Path, t.Suite.PackageName+".test") if fileExists(compiledFile) { // seems like we are on an old go version that does not support the -o flag on go test // move the compiled test file to the desired location by hand err = os.Rename(compiledFile, path) if err != nil { - return fmt.Errorf("Failed to move compiled file: %s", err) + // We cannot move the file, perhaps because the source and destination + // are on different partitions. We can copy the file, however. + err = copyFile(compiledFile, path) + if err != nil { + return fmt.Errorf("Failed to copy compiled file: %s", err) + } } } else { return fmt.Errorf("Failed to compile %s: output file %q could not be found", t.Suite.PackageName, path) @@ -124,6 +129,49 @@ func fileExists(path string) bool { return err == nil || os.IsNotExist(err) == false } +// copyFile copies the contents of the file named src to the file named +// by dst. The file will be created if it does not already exist. If the +// destination file exists, all it's contents will be replaced by the contents +// of the source file. +func copyFile(src, dst string) error { + srcInfo, err := os.Stat(src) + if err != nil { + return err + } + mode := srcInfo.Mode() + + in, err := os.Open(src) + if err != nil { + return err + } + + defer in.Close() + + out, err := os.Create(dst) + if err != nil { + return err + } + + defer func() { + closeErr := out.Close() + if err == nil { + err = closeErr + } + }() + + _, err = io.Copy(out, in) + if err != nil { + return err + } + + err = out.Sync() + if err != nil { + return err + } + + return out.Chmod(mode) +} + /* go test -c -i spits package.test out into the cwd. there's no way to change this. diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go index cc7d2f45393..4ef3bc44ff1 100644 --- a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go +++ b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testsuite/test_suite.go @@ -4,6 +4,7 @@ import ( "errors" "io/ioutil" "os" + "path" "path/filepath" "regexp" "strings" @@ -47,6 +48,15 @@ func PrecompiledTestSuite(path string) (TestSuite, error) { func SuitesInDir(dir string, recurse bool) []TestSuite { suites := []TestSuite{} + + // "This change will only be enabled if the go command is run with + // GO15VENDOREXPERIMENT=1 in its environment." + // c.f. the vendor-experiment proposal https://goo.gl/2ucMeC + vendorExperiment := os.Getenv("GO15VENDOREXPERIMENT") + if (vendorExperiment == "1") && path.Base(dir) == "vendor" { + return suites + } + files, _ := ioutil.ReadDir(dir) re := regexp.MustCompile(`_test\.go$`) for _, file := range files { diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/unfocus_command.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/unfocus_command.go index 16f3c3b72e3..683c3a9982d 100644 --- a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/unfocus_command.go +++ b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/unfocus_command.go @@ -24,6 +24,8 @@ func unfocusSpecs([]string, []string) { unfocus("Context") unfocus("It") unfocus("Measure") + unfocus("DescribeTable") + unfocus("Entry") } func unfocus(component string) { diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch_command.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch_command.go index 97361224833..03ea0125877 100644 --- a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch_command.go +++ b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch_command.go @@ -80,7 +80,7 @@ func (w *SpecWatcher) WatchSuites(args []string, additionalArgs []string) { } for suite, err := range errors { - fmt.Printf("Failed to watch %s: %s\n"+suite.PackageName, err) + fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err) } if len(suites) == 1 { diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo_dsl.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo_dsl.go index 36f6d8e773f..1d8e593052a 100644 --- a/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo_dsl.go +++ b/Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo_dsl.go @@ -347,6 +347,28 @@ func XIt(text string, _ ...interface{}) bool { return true } +//Specify blocks are aliases for It blocks and allow for more natural wording in situations +//which "It" does not fit into a natural sentence flow. All the same protocols apply for Specify blocks +//which apply to It blocks. +func Specify(text string, body interface{}, timeout ...float64) bool { + return It(text, body, timeout...) +} + +//You can focus individual Specifys using FSpecify +func FSpecify(text string, body interface{}, timeout ...float64) bool { + return FIt(text, body, timeout...) +} + +//You can mark Specifys as pending using PSpecify +func PSpecify(text string, is ...interface{}) bool { + return PIt(text, is...) +} + +//You can mark Specifys as pending using XSpecify +func XSpecify(text string, is ...interface{}) bool { + return XIt(text, is...) +} + //By allows you to better document large Its. // //Generally you should try to keep your Its short and to the point. This is not always possible, however, diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/runner.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/runner.go index 003f8516099..870ad826da0 100644 --- a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/runner.go +++ b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/leafnodes/runner.go @@ -68,7 +68,7 @@ func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) done := make(chan interface{}, 1) go func() { - finished := false + finished := false defer func() { if e := recover(); e != nil || !finished { @@ -83,7 +83,7 @@ func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) }() r.asyncFunc(done) - finished = true + finished = true }() select { @@ -96,7 +96,7 @@ func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure) return } func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) { - finished := false + finished := false defer func() { if e := recover(); e != nil || !finished { @@ -107,7 +107,7 @@ func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) }() r.syncFunc() - finished = true + finished = true return } diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go index 181b227e62e..1235ad00cbb 100644 --- a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go +++ b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go @@ -6,7 +6,6 @@ import ( "errors" "io/ioutil" "os" - "syscall" ) func NewOutputInterceptor() OutputInterceptor { @@ -31,8 +30,12 @@ func (interceptor *outputInterceptor) StartInterceptingOutput() error { return err } - syscall.Dup2(int(interceptor.redirectFile.Fd()), 1) - syscall.Dup2(int(interceptor.redirectFile.Fd()), 2) + // Call a function in ./syscall_dup_*.go + // If building for everything other than linux_arm64, + // use a "normal" syscall.Dup2(oldfd, newfd) call. If building for linux_arm64 (which doesn't have syscall.Dup2) + // call syscall.Dup3(oldfd, newfd, 0). They are nearly identical, see: http://linux.die.net/man/2/dup3 + syscallDup(int(interceptor.redirectFile.Fd()), 1) + syscallDup(int(interceptor.redirectFile.Fd()), 2) return nil } diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go new file mode 100644 index 00000000000..9550d37b36b --- /dev/null +++ b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/syscall_dup_linux_arm64.go @@ -0,0 +1,11 @@ +// +build linux,arm64 + +package remote + +import "syscall" + +// linux_arm64 doesn't have syscall.Dup2 which ginkgo uses, so +// use the nearly identical syscall.Dup3 instead +func syscallDup(oldfd int, newfd int) (err error) { + return syscall.Dup3(oldfd, newfd, 0) +} diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go new file mode 100644 index 00000000000..e7fad5bbb03 --- /dev/null +++ b/Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/syscall_dup_unix.go @@ -0,0 +1,10 @@ +// +build !linux !arm64 +// +build !windows + +package remote + +import "syscall" + +func syscallDup(oldfd int, newfd int) (err error) { + return syscall.Dup2(oldfd, newfd) +} diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go index 8d1a438df7f..5b5d905da16 100644 --- a/Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go +++ b/Godeps/_workspace/src/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go @@ -8,6 +8,7 @@ package stenographer import ( "fmt" + "runtime" "strings" "github.com/onsi/ginkgo/types" @@ -59,14 +60,20 @@ type Stenographer interface { } func New(color bool) Stenographer { + denoter := "•" + if runtime.GOOS == "windows" { + denoter = "+" + } return &consoleStenographer{ color: color, + denoter: denoter, cursorState: cursorStateTop, } } type consoleStenographer struct { color bool + denoter string cursorState cursorStateType } @@ -197,7 +204,7 @@ func (s *consoleStenographer) announceSetupFailure(name string, summary *types.S s.println(0, s.colorize(redColor+boldStyle, "%s [%.3f seconds]", message, summary.RunTime.Seconds())) - indentation := s.printCodeLocationBlock([]string{name}, []types.CodeLocation{summary.CodeLocation}, summary.ComponentType, 0, true, true) + indentation := s.printCodeLocationBlock([]string{name}, []types.CodeLocation{summary.CodeLocation}, summary.ComponentType, 0, summary.State, true) s.printNewLine() s.printFailure(indentation, summary.State, summary.Failure, fullTrace) @@ -216,13 +223,13 @@ func (s *consoleStenographer) AnnounceCapturedOutput(output string) { } func (s *consoleStenographer) AnnounceSuccesfulSpec(spec *types.SpecSummary) { - s.print(0, s.colorize(greenColor, "•")) + s.print(0, s.colorize(greenColor, s.denoter)) s.stream() } func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, succinct bool) { s.printBlockWithMessage( - s.colorize(greenColor, "• [SLOW TEST:%.3f seconds]", spec.RunTime.Seconds()), + s.colorize(greenColor, "%s [SLOW TEST:%.3f seconds]", s.denoter, spec.RunTime.Seconds()), "", spec, succinct, @@ -231,7 +238,7 @@ func (s *consoleStenographer) AnnounceSuccesfulSlowSpec(spec *types.SpecSummary, func (s *consoleStenographer) AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool) { s.printBlockWithMessage( - s.colorize(greenColor, "• [MEASUREMENT]"), + s.colorize(greenColor, "%s [MEASUREMENT]", s.denoter), s.measurementReport(spec, succinct), spec, succinct, @@ -261,24 +268,24 @@ func (s *consoleStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succi s.startBlock() s.println(0, s.colorize(cyanColor+boldStyle, "S [SKIPPING]%s [%.3f seconds]", s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds())) - indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, true, succinct) + indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct) s.printNewLine() - s.printFailure(indentation, spec.State, spec.Failure, fullTrace) + s.printSkip(indentation, spec.Failure) s.endBlock() } } func (s *consoleStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) { - s.printSpecFailure("•... Timeout", spec, succinct, fullTrace) + s.printSpecFailure(fmt.Sprintf("%s... Timeout", s.denoter), spec, succinct, fullTrace) } func (s *consoleStenographer) AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool) { - s.printSpecFailure("•! Panic", spec, succinct, fullTrace) + s.printSpecFailure(fmt.Sprintf("%s! Panic", s.denoter), spec, succinct, fullTrace) } func (s *consoleStenographer) AnnounceSpecFailed(spec *types.SpecSummary, succinct bool, fullTrace bool) { - s.printSpecFailure("• Failure", spec, succinct, fullTrace) + s.printSpecFailure(fmt.Sprintf("%s Failure", s.denoter), spec, succinct, fullTrace) } func (s *consoleStenographer) SummarizeFailures(summaries []*types.SpecSummary) { @@ -311,7 +318,7 @@ func (s *consoleStenographer) SummarizeFailures(summaries []*types.SpecSummary) } else if summary.Failed() { s.print(0, s.colorize(redColor+boldStyle, "[Fail] ")) } - s.printSpecContext(summary.ComponentTexts, summary.ComponentCodeLocations, summary.Failure.ComponentType, summary.Failure.ComponentIndex, true, true) + s.printSpecContext(summary.ComponentTexts, summary.ComponentCodeLocations, summary.Failure.ComponentType, summary.Failure.ComponentIndex, summary.State, true) s.printNewLine() s.println(0, s.colorize(lightGrayColor, summary.Failure.Location.String())) } @@ -344,7 +351,7 @@ func (s *consoleStenographer) printBlockWithMessage(header string, message strin s.startBlock() s.println(0, header) - indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, types.SpecComponentTypeInvalid, 0, false, succinct) + indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, types.SpecComponentTypeInvalid, 0, spec.State, succinct) if message != "" { s.printNewLine() @@ -358,7 +365,7 @@ func (s *consoleStenographer) printSpecFailure(message string, spec *types.SpecS s.startBlock() s.println(0, s.colorize(redColor+boldStyle, "%s%s [%.3f seconds]", message, s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds())) - indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, true, succinct) + indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, spec.State, succinct) s.printNewLine() s.printFailure(indentation, spec.State, spec.Failure, fullTrace) @@ -382,6 +389,12 @@ func (s *consoleStenographer) failureContext(failedComponentType types.SpecCompo return "" } +func (s *consoleStenographer) printSkip(indentation int, spec types.SpecFailure) { + s.println(indentation, s.colorize(cyanColor, spec.Message)) + s.printNewLine() + s.println(indentation, spec.Location.String()) +} + func (s *consoleStenographer) printFailure(indentation int, state types.SpecState, failure types.SpecFailure, fullTrace bool) { if state == types.SpecStatePanicked { s.println(indentation, s.colorize(redColor+boldStyle, failure.Message)) @@ -402,7 +415,7 @@ func (s *consoleStenographer) printFailure(indentation int, state types.SpecStat } } -func (s *consoleStenographer) printSpecContext(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, failure bool, succinct bool) int { +func (s *consoleStenographer) printSpecContext(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int { startIndex := 1 indentation := 0 @@ -411,7 +424,11 @@ func (s *consoleStenographer) printSpecContext(componentTexts []string, componen } for i := startIndex; i < len(componentTexts); i++ { - if failure && i == failedComponentIndex { + if (state.IsFailure() || state == types.SpecStateSkipped) && i == failedComponentIndex { + color := redColor + if state == types.SpecStateSkipped { + color = cyanColor + } blockType := "" switch failedComponentType { case types.SpecComponentTypeBeforeSuite: @@ -430,9 +447,9 @@ func (s *consoleStenographer) printSpecContext(componentTexts []string, componen blockType = "Measurement" } if succinct { - s.print(0, s.colorize(redColor+boldStyle, "[%s] %s ", blockType, componentTexts[i])) + s.print(0, s.colorize(color+boldStyle, "[%s] %s ", blockType, componentTexts[i])) } else { - s.println(indentation, s.colorize(redColor+boldStyle, "%s [%s]", componentTexts[i], blockType)) + s.println(indentation, s.colorize(color+boldStyle, "%s [%s]", componentTexts[i], blockType)) s.println(indentation, s.colorize(grayColor, "%s", componentCodeLocations[i])) } } else { @@ -449,8 +466,8 @@ func (s *consoleStenographer) printSpecContext(componentTexts []string, componen return indentation } -func (s *consoleStenographer) printCodeLocationBlock(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, failure bool, succinct bool) int { - indentation := s.printSpecContext(componentTexts, componentCodeLocations, failedComponentType, failedComponentIndex, failure, succinct) +func (s *consoleStenographer) printCodeLocationBlock(componentTexts []string, componentCodeLocations []types.CodeLocation, failedComponentType types.SpecComponentType, failedComponentIndex int, state types.SpecState, succinct bool) int { + indentation := s.printSpecContext(componentTexts, componentCodeLocations, failedComponentType, failedComponentIndex, state, succinct) if succinct { if len(componentTexts) > 0 { diff --git a/Godeps/_workspace/src/github.com/onsi/ginkgo/types/types.go b/Godeps/_workspace/src/github.com/onsi/ginkgo/types/types.go index 583b3473959..889612e0a72 100644 --- a/Godeps/_workspace/src/github.com/onsi/ginkgo/types/types.go +++ b/Godeps/_workspace/src/github.com/onsi/ginkgo/types/types.go @@ -35,7 +35,7 @@ type SpecSummary struct { } func (s SpecSummary) HasFailureState() bool { - return s.State == SpecStateTimedOut || s.State == SpecStatePanicked || s.State == SpecStateFailed + return s.State.IsFailure() } func (s SpecSummary) TimedOut() bool { @@ -115,6 +115,10 @@ const ( SpecStateTimedOut ) +func (state SpecState) IsFailure() bool { + return state == SpecStateTimedOut || state == SpecStatePanicked || state == SpecStateFailed +} + type SpecComponentType uint const (